2024-11-23 08:21:37,839 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5b7a7f33 2024-11-23 08:21:37,864 main DEBUG Took 0.021199 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 08:21:37,864 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 08:21:37,865 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 08:21:37,866 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 08:21:37,869 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,884 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 08:21:37,906 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,910 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,911 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,912 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,912 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,914 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,914 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,915 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,916 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,917 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,918 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,919 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,920 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,921 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,922 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,923 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,923 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,924 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,925 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,926 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,926 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,927 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,928 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 08:21:37,930 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,931 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 08:21:37,937 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 08:21:37,939 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 08:21:37,943 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 08:21:37,952 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 08:21:37,956 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 08:21:37,956 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 08:21:37,975 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 08:21:37,982 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 08:21:37,993 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 08:21:37,994 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 08:21:37,995 main DEBUG createAppenders(={Console}) 2024-11-23 08:21:37,999 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5b7a7f33 initialized 2024-11-23 08:21:38,001 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5b7a7f33 2024-11-23 08:21:38,001 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5b7a7f33 OK. 2024-11-23 08:21:38,003 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 08:21:38,004 main DEBUG OutputStream closed 2024-11-23 08:21:38,006 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 08:21:38,006 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 08:21:38,007 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@593aaf41 OK 2024-11-23 08:21:38,171 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 08:21:38,174 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 08:21:38,176 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 08:21:38,177 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 08:21:38,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 08:21:38,178 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 08:21:38,179 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 08:21:38,179 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 08:21:38,180 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 08:21:38,180 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 08:21:38,181 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 08:21:38,181 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 08:21:38,182 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 08:21:38,182 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 08:21:38,183 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 08:21:38,183 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 08:21:38,183 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 08:21:38,184 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 08:21:38,191 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 08:21:38,192 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@4331d187) with optional ClassLoader: null 2024-11-23 08:21:38,192 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 08:21:38,193 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@4331d187] started OK. 2024-11-23T08:21:38,246 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-23 08:21:38,251 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 08:21:38,251 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T08:21:38,797 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1 2024-11-23T08:21:38,799 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-23T08:21:38,911 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T08:21:39,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T08:21:39,367 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52, deleteOnExit=true 2024-11-23T08:21:39,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T08:21:39,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/test.cache.data in system properties and HBase conf 2024-11-23T08:21:39,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T08:21:39,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir in system properties and HBase conf 2024-11-23T08:21:39,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T08:21:39,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T08:21:39,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T08:21:39,498 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T08:21:39,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T08:21:39,506 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T08:21:39,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T08:21:39,513 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T08:21:39,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T08:21:39,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T08:21:39,519 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T08:21:39,522 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T08:21:39,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T08:21:39,523 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/nfs.dump.dir in system properties and HBase conf 2024-11-23T08:21:39,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir in system properties and HBase conf 2024-11-23T08:21:39,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T08:21:39,530 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T08:21:39,531 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T08:21:40,987 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T08:21:41,114 INFO [Time-limited test {}] log.Log(170): Logging initialized @4681ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T08:21:41,240 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:41,363 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:41,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:41,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:41,436 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T08:21:41,476 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:41,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@654c02d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:41,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd6ab6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:41,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12351f7e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-37141-hadoop-hdfs-3_4_1-tests_jar-_-any-5601468738707058497/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T08:21:41,817 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141} 2024-11-23T08:21:41,817 INFO [Time-limited test {}] server.Server(415): Started @5386ms 2024-11-23T08:21:42,485 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:42,495 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:42,499 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:42,499 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:42,500 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:42,503 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4521559e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:42,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3622d218{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:42,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74bb782c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-33705-hadoop-hdfs-3_4_1-tests_jar-_-any-7630009942200912411/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:21:42,627 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705} 2024-11-23T08:21:42,627 INFO [Time-limited test {}] server.Server(415): Started @6196ms 2024-11-23T08:21:42,688 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T08:21:42,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:42,916 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:42,955 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:42,956 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:42,956 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:42,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32a76e2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:42,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1de9333b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:43,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8fd4906{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-40381-hadoop-hdfs-3_4_1-tests_jar-_-any-5795281389178429889/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:21:43,117 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381} 2024-11-23T08:21:43,118 INFO [Time-limited test {}] server.Server(415): Started @6687ms 2024-11-23T08:21:43,123 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T08:21:43,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:43,223 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:43,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:43,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:43,228 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:43,229 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2de28195{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:43,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@108f4b55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:43,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@474673d3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-39875-hadoop-hdfs-3_4_1-tests_jar-_-any-12828322588782167297/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:21:43,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875} 2024-11-23T08:21:43,364 INFO [Time-limited test {}] server.Server(415): Started @6933ms 2024-11-23T08:21:43,366 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T08:21:44,495 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,495 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,495 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,501 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,552 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T08:21:44,552 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T08:21:44,583 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,587 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447/current, will proceed with Du for space computation calculation, 2024-11-23T08:21:44,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x372d6c5a16662313 with lease ID 0x5867b60dda36a755: Processing first storage report for DS-ece71f5d-bd97-40ff-a109-3f66859df621 from datanode DatanodeRegistration(127.0.0.1:42473, datanodeUuid=45c86a91-b353-4da8-bbc2-31f7404968bc, infoPort=42885, infoSecurePort=0, ipcPort=44749, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x372d6c5a16662313 with lease ID 0x5867b60dda36a755: from storage DS-ece71f5d-bd97-40ff-a109-3f66859df621 node DatanodeRegistration(127.0.0.1:42473, datanodeUuid=45c86a91-b353-4da8-bbc2-31f7404968bc, infoPort=42885, infoSecurePort=0, ipcPort=44749, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14e70a4ac2f87daa with lease ID 0x5867b60dda36a754: Processing first storage report for DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0 from datanode DatanodeRegistration(127.0.0.1:37117, datanodeUuid=f41c14d4-e902-43cc-9e0d-b02b9537cf74, infoPort=42215, infoSecurePort=0, ipcPort=33243, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,624 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14e70a4ac2f87daa with lease ID 0x5867b60dda36a754: from storage DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0 node DatanodeRegistration(127.0.0.1:37117, datanodeUuid=f41c14d4-e902-43cc-9e0d-b02b9537cf74, infoPort=42215, infoSecurePort=0, ipcPort=33243, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x372d6c5a16662313 with lease ID 0x5867b60dda36a755: Processing first storage report for DS-af18c57d-ca09-486b-b61e-8d9187ffd4c8 from datanode DatanodeRegistration(127.0.0.1:42473, datanodeUuid=45c86a91-b353-4da8-bbc2-31f7404968bc, infoPort=42885, infoSecurePort=0, ipcPort=44749, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x372d6c5a16662313 with lease ID 0x5867b60dda36a755: from storage DS-af18c57d-ca09-486b-b61e-8d9187ffd4c8 node DatanodeRegistration(127.0.0.1:42473, datanodeUuid=45c86a91-b353-4da8-bbc2-31f7404968bc, infoPort=42885, infoSecurePort=0, ipcPort=44749, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14e70a4ac2f87daa with lease ID 0x5867b60dda36a754: Processing first storage report for DS-8f5a1ac2-a21d-4f7d-a57d-2bd18e1c0bed from datanode DatanodeRegistration(127.0.0.1:37117, datanodeUuid=f41c14d4-e902-43cc-9e0d-b02b9537cf74, infoPort=42215, infoSecurePort=0, ipcPort=33243, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14e70a4ac2f87daa with lease ID 0x5867b60dda36a754: from storage DS-8f5a1ac2-a21d-4f7d-a57d-2bd18e1c0bed node DatanodeRegistration(127.0.0.1:37117, datanodeUuid=f41c14d4-e902-43cc-9e0d-b02b9537cf74, infoPort=42215, infoSecurePort=0, ipcPort=33243, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,627 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T08:21:44,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd120d4c1ba715c with lease ID 0x5867b60dda36a756: Processing first storage report for DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58 from datanode DatanodeRegistration(127.0.0.1:41035, datanodeUuid=8691d9da-dea3-44e2-98c0-b3ac4322e349, infoPort=42593, infoSecurePort=0, ipcPort=38549, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,640 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd120d4c1ba715c with lease ID 0x5867b60dda36a756: from storage DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58 node DatanodeRegistration(127.0.0.1:41035, datanodeUuid=8691d9da-dea3-44e2-98c0-b3ac4322e349, infoPort=42593, infoSecurePort=0, ipcPort=38549, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,640 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd120d4c1ba715c with lease ID 0x5867b60dda36a756: Processing first storage report for DS-56689eb0-abbc-49a9-bff2-b5c153ae5f5b from datanode DatanodeRegistration(127.0.0.1:41035, datanodeUuid=8691d9da-dea3-44e2-98c0-b3ac4322e349, infoPort=42593, infoSecurePort=0, ipcPort=38549, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447) 2024-11-23T08:21:44,641 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd120d4c1ba715c with lease ID 0x5867b60dda36a756: from storage DS-56689eb0-abbc-49a9-bff2-b5c153ae5f5b node DatanodeRegistration(127.0.0.1:41035, datanodeUuid=8691d9da-dea3-44e2-98c0-b3ac4322e349, infoPort=42593, infoSecurePort=0, ipcPort=38549, storageInfo=lv=-57;cid=testClusterID;nsid=707131011;c=1732350100447), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T08:21:44,709 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1 2024-11-23T08:21:44,803 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/zookeeper_0, clientPort=60357, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T08:21:44,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60357 2024-11-23T08:21:44,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:44,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:45,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741825_1001 (size=7) 2024-11-23T08:21:45,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741825_1001 (size=7) 2024-11-23T08:21:45,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741825_1001 (size=7) 2024-11-23T08:21:45,602 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 with version=8 2024-11-23T08:21:45,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/hbase-staging 2024-11-23T08:21:45,729 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T08:21:45,994 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b33d9013c60a:0 server-side Connection retries=45 2024-11-23T08:21:46,004 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,005 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,010 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T08:21:46,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T08:21:46,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T08:21:46,252 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T08:21:46,264 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T08:21:46,268 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T08:21:46,294 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 103998 (auto-detected) 2024-11-23T08:21:46,296 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T08:21:46,328 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38501 2024-11-23T08:21:46,359 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38501 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-11-23T08:21:46,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385010x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T08:21:46,509 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38501-0x1016812678f0000 connected 2024-11-23T08:21:46,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:46,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:46,645 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:21:46,650 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8, hbase.cluster.distributed=false 2024-11-23T08:21:46,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T08:21:46,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38501 2024-11-23T08:21:46,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38501 2024-11-23T08:21:46,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38501 2024-11-23T08:21:46,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38501 2024-11-23T08:21:46,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38501 2024-11-23T08:21:46,849 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b33d9013c60a:0 server-side Connection retries=45 2024-11-23T08:21:46,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T08:21:46,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T08:21:46,855 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T08:21:46,859 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T08:21:46,860 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45199 2024-11-23T08:21:46,863 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45199 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-11-23T08:21:46,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:46,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:46,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:451990x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T08:21:46,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45199-0x1016812678f0001 connected 2024-11-23T08:21:46,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:21:46,902 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T08:21:46,913 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T08:21:46,917 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T08:21:46,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T08:21:46,932 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45199 2024-11-23T08:21:46,933 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45199 2024-11-23T08:21:46,937 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45199 2024-11-23T08:21:46,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45199 2024-11-23T08:21:46,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45199 2024-11-23T08:21:46,963 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b33d9013c60a:0 server-side Connection retries=45 2024-11-23T08:21:46,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,963 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,964 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T08:21:46,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:46,964 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T08:21:46,964 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T08:21:46,967 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T08:21:46,969 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43417 2024-11-23T08:21:46,971 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43417 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-11-23T08:21:46,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:46,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:47,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434170x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T08:21:47,022 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:434170x0, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:21:47,023 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T08:21:47,023 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43417-0x1016812678f0002 connected 2024-11-23T08:21:47,026 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T08:21:47,027 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T08:21:47,030 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T08:21:47,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43417 2024-11-23T08:21:47,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43417 2024-11-23T08:21:47,031 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43417 2024-11-23T08:21:47,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43417 2024-11-23T08:21:47,032 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43417 2024-11-23T08:21:47,050 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b33d9013c60a:0 server-side Connection retries=45 2024-11-23T08:21:47,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:47,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:47,050 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T08:21:47,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T08:21:47,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T08:21:47,051 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T08:21:47,051 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T08:21:47,054 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39649 2024-11-23T08:21:47,056 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39649 connecting to ZooKeeper ensemble=127.0.0.1:60357 2024-11-23T08:21:47,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:47,060 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:47,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396490x0, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T08:21:47,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:396490x0, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:21:47,075 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T08:21:47,075 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39649-0x1016812678f0003 connected 2024-11-23T08:21:47,080 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T08:21:47,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T08:21:47,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T08:21:47,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39649 2024-11-23T08:21:47,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39649 2024-11-23T08:21:47,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39649 2024-11-23T08:21:47,100 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39649 2024-11-23T08:21:47,100 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39649 2024-11-23T08:21:47,127 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b33d9013c60a:38501 2024-11-23T08:21:47,130 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b33d9013c60a,38501,1732350105792 2024-11-23T08:21:47,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,149 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b33d9013c60a,38501,1732350105792 2024-11-23T08:21:47,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T08:21:47,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T08:21:47,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T08:21:47,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,190 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T08:21:47,191 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b33d9013c60a,38501,1732350105792 from backup master directory 2024-11-23T08:21:47,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b33d9013c60a,38501,1732350105792 2024-11-23T08:21:47,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,205 WARN [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T08:21:47,205 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b33d9013c60a,38501,1732350105792 2024-11-23T08:21:47,208 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T08:21:47,209 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T08:21:47,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T08:21:47,284 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/hbase.id] with ID: 7c4199b0-a333-4032-bd3c-7e578b25122d 2024-11-23T08:21:47,284 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.tmp/hbase.id 2024-11-23T08:21:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741826_1002 (size=42) 2024-11-23T08:21:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741826_1002 (size=42) 2024-11-23T08:21:47,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741826_1002 (size=42) 2024-11-23T08:21:47,306 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.tmp/hbase.id]:[hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/hbase.id] 2024-11-23T08:21:47,375 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:47,380 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T08:21:47,406 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 23ms. 2024-11-23T08:21:47,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:47,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741827_1003 (size=196) 2024-11-23T08:21:47,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741827_1003 (size=196) 2024-11-23T08:21:47,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741827_1003 (size=196) 2024-11-23T08:21:47,468 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:21:47,471 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T08:21:47,488 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:21:47,493 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T08:21:47,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741828_1004 (size=1189) 2024-11-23T08:21:47,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741828_1004 (size=1189) 2024-11-23T08:21:47,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741828_1004 (size=1189) 2024-11-23T08:21:47,558 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/data/master/store 2024-11-23T08:21:47,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741829_1005 (size=34) 2024-11-23T08:21:47,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741829_1005 (size=34) 2024-11-23T08:21:47,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741829_1005 (size=34) 2024-11-23T08:21:47,596 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T08:21:47,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:47,601 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T08:21:47,601 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:21:47,601 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:21:47,603 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T08:21:47,604 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:21:47,604 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:21:47,607 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732350107601Disabling compacts and flushes for region at 1732350107601Disabling writes for close at 1732350107604 (+3 ms)Writing region close event to WAL at 1732350107604Closed at 1732350107604 2024-11-23T08:21:47,609 WARN [master/b33d9013c60a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/data/master/store/.initializing 2024-11-23T08:21:47,610 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792 2024-11-23T08:21:47,620 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T08:21:47,645 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b33d9013c60a%2C38501%2C1732350105792, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/oldWALs, maxLogs=10 2024-11-23T08:21:47,678 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652, exclude list is [], retry=0 2024-11-23T08:21:47,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42473,DS-ece71f5d-bd97-40ff-a109-3f66859df621,DISK] 2024-11-23T08:21:47,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41035,DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58,DISK] 2024-11-23T08:21:47,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37117,DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0,DISK] 2024-11-23T08:21:47,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T08:21:47,771 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 2024-11-23T08:21:47,772 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42885:42885),(127.0.0.1/127.0.0.1:42215:42215),(127.0.0.1/127.0.0.1:42593:42593)] 2024-11-23T08:21:47,773 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T08:21:47,773 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:47,781 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,782 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T08:21:47,881 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:47,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:47,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T08:21:47,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:47,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:47,897 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T08:21:47,909 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:47,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:47,913 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,921 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T08:21:47,921 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:47,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:47,924 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,929 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,931 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,938 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,939 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,945 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T08:21:47,949 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T08:21:47,955 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:47,957 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70987006, jitterRate=0.05778881907463074}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T08:21:47,968 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732350107802Initializing all the Stores at 1732350107805 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350107806 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350107807 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350107807Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350107807Cleaning up temporary data from old regions at 1732350107940 (+133 ms)Region opened successfully at 1732350107968 (+28 ms) 2024-11-23T08:21:47,969 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T08:21:48,009 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4488c207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b33d9013c60a/172.17.0.2:0 2024-11-23T08:21:48,045 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T08:21:48,056 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T08:21:48,056 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T08:21:48,059 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T08:21:48,060 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T08:21:48,066 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-23T08:21:48,066 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T08:21:48,093 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T08:21:48,103 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T08:21:48,162 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T08:21:48,164 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T08:21:48,166 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T08:21:48,182 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T08:21:48,185 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T08:21:48,188 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T08:21:48,198 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T08:21:48,199 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T08:21:48,214 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T08:21:48,236 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T08:21:48,248 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,269 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b33d9013c60a,38501,1732350105792, sessionid=0x1016812678f0000, setting cluster-up flag (Was=false) 2024-11-23T08:21:48,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,356 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T08:21:48,359 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b33d9013c60a,38501,1732350105792 2024-11-23T08:21:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:48,439 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T08:21:48,441 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b33d9013c60a,38501,1732350105792 2024-11-23T08:21:48,447 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T08:21:48,481 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-23T08:21:48,485 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:48,486 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-23T08:21:48,511 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(746): ClusterId : 7c4199b0-a333-4032-bd3c-7e578b25122d 2024-11-23T08:21:48,511 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(746): ClusterId : 7c4199b0-a333-4032-bd3c-7e578b25122d 2024-11-23T08:21:48,519 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T08:21:48,519 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(746): ClusterId : 7c4199b0-a333-4032-bd3c-7e578b25122d 2024-11-23T08:21:48,520 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T08:21:48,520 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T08:21:48,554 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T08:21:48,567 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T08:21:48,567 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T08:21:48,567 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T08:21:48,567 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T08:21:48,568 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T08:21:48,568 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T08:21:48,570 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T08:21:48,579 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T08:21:48,587 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b33d9013c60a,38501,1732350105792 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T08:21:48,597 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T08:21:48,597 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T08:21:48,597 DEBUG [RS:2;b33d9013c60a:39649 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@979b8f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b33d9013c60a/172.17.0.2:0 2024-11-23T08:21:48,597 DEBUG [RS:0;b33d9013c60a:45199 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6707daa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b33d9013c60a/172.17.0.2:0 2024-11-23T08:21:48,598 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T08:21:48,598 DEBUG [RS:1;b33d9013c60a:43417 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@134efedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b33d9013c60a/172.17.0.2:0 2024-11-23T08:21:48,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b33d9013c60a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T08:21:48,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b33d9013c60a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T08:21:48,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b33d9013c60a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T08:21:48,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b33d9013c60a:0, corePoolSize=5, maxPoolSize=5 2024-11-23T08:21:48,599 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b33d9013c60a:0, corePoolSize=10, maxPoolSize=10 2024-11-23T08:21:48,600 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:48,600 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b33d9013c60a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T08:21:48,600 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:48,618 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b33d9013c60a:39649 2024-11-23T08:21:48,624 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T08:21:48,625 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T08:21:48,625 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T08:21:48,625 INFO [RS:2;b33d9013c60a:39649 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:48,626 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T08:21:48,628 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b33d9013c60a:45199 2024-11-23T08:21:48,629 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T08:21:48,629 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T08:21:48,629 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T08:21:48,629 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=39649, startcode=1732350107049 2024-11-23T08:21:48,629 INFO [RS:0;b33d9013c60a:45199 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:48,629 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T08:21:48,631 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=45199, startcode=1732350106787 2024-11-23T08:21:48,636 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b33d9013c60a:43417 2024-11-23T08:21:48,636 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T08:21:48,636 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T08:21:48,637 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T08:21:48,637 INFO [RS:1;b33d9013c60a:43417 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:48,637 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T08:21:48,639 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=43417, startcode=1732350106962 2024-11-23T08:21:48,650 DEBUG [RS:2;b33d9013c60a:39649 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T08:21:48,652 DEBUG [RS:1;b33d9013c60a:43417 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T08:21:48,652 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T08:21:48,653 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T08:21:48,662 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:48,662 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T08:21:48,663 DEBUG [RS:0;b33d9013c60a:45199 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T08:21:48,685 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732350138685 2024-11-23T08:21:48,687 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T08:21:48,689 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T08:21:48,694 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T08:21:48,695 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T08:21:48,696 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T08:21:48,696 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T08:21:48,724 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44197, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T08:21:48,719 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:48,725 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58513, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T08:21:48,727 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49541, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T08:21:48,728 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T08:21:48,730 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T08:21:48,731 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T08:21:48,735 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T08:21:48,745 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T08:21:48,747 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T08:21:48,759 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T08:21:48,760 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T08:21:48,766 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.large.0-1732350108761,5,FailOnTimeoutGroup] 2024-11-23T08:21:48,776 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T08:21:48,776 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T08:21:48,776 WARN [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T08:21:48,776 WARN [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T08:21:48,777 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T08:21:48,777 WARN [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T08:21:48,781 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.small.0-1732350108767,5,FailOnTimeoutGroup] 2024-11-23T08:21:48,781 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:48,781 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T08:21:48,783 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:48,783 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:48,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741831_1007 (size=1321) 2024-11-23T08:21:48,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741831_1007 (size=1321) 2024-11-23T08:21:48,787 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T08:21:48,788 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741831_1007 (size=1321) 2024-11-23T08:21:48,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741832_1008 (size=32) 2024-11-23T08:21:48,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741832_1008 (size=32) 2024-11-23T08:21:48,878 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=43417, startcode=1732350106962 2024-11-23T08:21:48,878 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=39649, startcode=1732350107049 2024-11-23T08:21:48,879 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:48,879 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(2659): reportForDuty to master=b33d9013c60a,38501,1732350105792 with port=45199, startcode=1732350106787 2024-11-23T08:21:48,880 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b33d9013c60a,43417,1732350106962 2024-11-23T08:21:48,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741832_1008 (size=32) 2024-11-23T08:21:48,883 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(517): Registering regionserver=b33d9013c60a,43417,1732350106962 2024-11-23T08:21:48,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T08:21:48,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T08:21:48,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:48,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:48,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T08:21:48,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T08:21:48,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:48,893 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b33d9013c60a,39649,1732350107049 2024-11-23T08:21:48,893 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(517): Registering regionserver=b33d9013c60a,39649,1732350107049 2024-11-23T08:21:48,894 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:48,894 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-11-23T08:21:48,894 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T08:21:48,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:48,897 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T08:21:48,899 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b33d9013c60a,45199,1732350106787 2024-11-23T08:21:48,899 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:48,900 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-11-23T08:21:48,900 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] master.ServerManager(517): Registering regionserver=b33d9013c60a,45199,1732350106787 2024-11-23T08:21:48,900 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T08:21:48,901 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T08:21:48,902 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:48,903 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:48,904 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T08:21:48,906 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:48,906 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34105 2024-11-23T08:21:48,906 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T08:21:48,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T08:21:48,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:48,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:48,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T08:21:48,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740 2024-11-23T08:21:48,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740 2024-11-23T08:21:48,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T08:21:48,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T08:21:48,920 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T08:21:48,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T08:21:48,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T08:21:48,935 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:48,939 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71153991, jitterRate=0.06027708947658539}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T08:21:48,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732350108879Initializing all the Stores at 1732350108883 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350108883Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350108884 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350108884Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350108884Cleaning up temporary data from old regions at 1732350108918 (+34 ms)Region opened successfully at 1732350108945 (+27 ms) 2024-11-23T08:21:48,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T08:21:48,946 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T08:21:48,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T08:21:48,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T08:21:48,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T08:21:48,955 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T08:21:48,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732350108945Disabling compacts and flushes for region at 1732350108945Disabling writes for close at 1732350108946 (+1 ms)Writing region close event to WAL at 1732350108954 (+8 ms)Closed at 1732350108955 (+1 ms) 2024-11-23T08:21:48,960 DEBUG [RS:0;b33d9013c60a:45199 {}] zookeeper.ZKUtil(111): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b33d9013c60a,45199,1732350106787 2024-11-23T08:21:48,960 DEBUG [RS:2;b33d9013c60a:39649 {}] zookeeper.ZKUtil(111): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b33d9013c60a,39649,1732350107049 2024-11-23T08:21:48,960 WARN [RS:2;b33d9013c60a:39649 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T08:21:48,960 WARN [RS:0;b33d9013c60a:45199 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T08:21:48,960 INFO [RS:2;b33d9013c60a:39649 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T08:21:48,961 INFO [RS:0;b33d9013c60a:45199 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T08:21:48,961 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,39649,1732350107049 2024-11-23T08:21:48,961 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,45199,1732350106787 2024-11-23T08:21:48,962 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T08:21:48,962 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T08:21:48,964 DEBUG [RS:1;b33d9013c60a:43417 {}] zookeeper.ZKUtil(111): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b33d9013c60a,43417,1732350106962 2024-11-23T08:21:48,965 WARN [RS:1;b33d9013c60a:43417 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T08:21:48,965 INFO [RS:1;b33d9013c60a:43417 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T08:21:48,965 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962 2024-11-23T08:21:48,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b33d9013c60a,43417,1732350106962] 2024-11-23T08:21:48,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b33d9013c60a,45199,1732350106787] 2024-11-23T08:21:48,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b33d9013c60a,39649,1732350107049] 2024-11-23T08:21:48,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T08:21:48,995 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T08:21:49,001 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T08:21:49,030 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T08:21:49,030 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T08:21:49,030 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T08:21:49,058 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T08:21:49,063 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T08:21:49,063 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T08:21:49,071 INFO [RS:1;b33d9013c60a:43417 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T08:21:49,071 INFO [RS:0;b33d9013c60a:45199 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T08:21:49,071 INFO [RS:2;b33d9013c60a:39649 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T08:21:49,071 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,071 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,071 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,073 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T08:21:49,075 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T08:21:49,075 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T08:21:49,080 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T08:21:49,080 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T08:21:49,081 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T08:21:49,082 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,082 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,082 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,082 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b33d9013c60a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,083 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b33d9013c60a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:0;b33d9013c60a:45199 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,084 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:1;b33d9013c60a:43417 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b33d9013c60a:0, corePoolSize=2, maxPoolSize=2 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,085 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b33d9013c60a:0, corePoolSize=1, maxPoolSize=1 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,086 DEBUG [RS:2;b33d9013c60a:39649 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b33d9013c60a:0, corePoolSize=3, maxPoolSize=3 2024-11-23T08:21:49,115 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,115 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,115 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,115 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,115 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,115 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,45199,1732350106787-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,116 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,43417,1732350106962-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T08:21:49,129 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,130 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,130 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,130 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,130 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,130 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,39649,1732350107049-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T08:21:49,142 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T08:21:49,144 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T08:21:49,149 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,45199,1732350106787-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,149 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,43417,1732350106962-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,150 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,150 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.Replication(171): b33d9013c60a,43417,1732350106962 started 2024-11-23T08:21:49,152 WARN [b33d9013c60a:38501 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T08:21:49,156 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T08:21:49,156 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,39649,1732350107049-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,157 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,157 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.Replication(171): b33d9013c60a,39649,1732350107049 started 2024-11-23T08:21:49,158 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,158 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.Replication(171): b33d9013c60a,45199,1732350106787 started 2024-11-23T08:21:49,177 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,178 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1482): Serving as b33d9013c60a,43417,1732350106962, RpcServer on b33d9013c60a/172.17.0.2:43417, sessionid=0x1016812678f0002 2024-11-23T08:21:49,179 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T08:21:49,179 DEBUG [RS:1;b33d9013c60a:43417 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b33d9013c60a,43417,1732350106962 2024-11-23T08:21:49,180 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,43417,1732350106962' 2024-11-23T08:21:49,180 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T08:21:49,182 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T08:21:49,182 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,182 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1482): Serving as b33d9013c60a,39649,1732350107049, RpcServer on b33d9013c60a/172.17.0.2:39649, sessionid=0x1016812678f0003 2024-11-23T08:21:49,183 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T08:21:49,183 DEBUG [RS:2;b33d9013c60a:39649 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b33d9013c60a,39649,1732350107049 2024-11-23T08:21:49,183 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,39649,1732350107049' 2024-11-23T08:21:49,183 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T08:21:49,183 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T08:21:49,184 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:49,184 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1482): Serving as b33d9013c60a,45199,1732350106787, RpcServer on b33d9013c60a/172.17.0.2:45199, sessionid=0x1016812678f0001 2024-11-23T08:21:49,184 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T08:21:49,185 DEBUG [RS:0;b33d9013c60a:45199 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b33d9013c60a,45199,1732350106787 2024-11-23T08:21:49,185 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,45199,1732350106787' 2024-11-23T08:21:49,185 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T08:21:49,185 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T08:21:49,185 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T08:21:49,185 DEBUG [RS:1;b33d9013c60a:43417 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b33d9013c60a,43417,1732350106962 2024-11-23T08:21:49,185 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,43417,1732350106962' 2024-11-23T08:21:49,185 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T08:21:49,186 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T08:21:49,187 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T08:21:49,187 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T08:21:49,187 DEBUG [RS:2;b33d9013c60a:39649 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b33d9013c60a,39649,1732350107049 2024-11-23T08:21:49,187 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,39649,1732350107049' 2024-11-23T08:21:49,187 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T08:21:49,188 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T08:21:49,188 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T08:21:49,188 DEBUG [RS:0;b33d9013c60a:45199 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b33d9013c60a,45199,1732350106787 2024-11-23T08:21:49,188 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b33d9013c60a,45199,1732350106787' 2024-11-23T08:21:49,188 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T08:21:49,189 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T08:21:49,189 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T08:21:49,191 DEBUG [RS:0;b33d9013c60a:45199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T08:21:49,191 INFO [RS:0;b33d9013c60a:45199 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T08:21:49,191 INFO [RS:0;b33d9013c60a:45199 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T08:21:49,192 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T08:21:49,192 DEBUG [RS:2;b33d9013c60a:39649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T08:21:49,192 INFO [RS:2;b33d9013c60a:39649 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T08:21:49,192 INFO [RS:2;b33d9013c60a:39649 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T08:21:49,193 DEBUG [RS:1;b33d9013c60a:43417 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T08:21:49,193 INFO [RS:1;b33d9013c60a:43417 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T08:21:49,193 INFO [RS:1;b33d9013c60a:43417 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T08:21:49,296 INFO [RS:0;b33d9013c60a:45199 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T08:21:49,296 INFO [RS:2;b33d9013c60a:39649 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T08:21:49,296 INFO [RS:1;b33d9013c60a:43417 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T08:21:49,300 INFO [RS:0;b33d9013c60a:45199 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b33d9013c60a%2C45199%2C1732350106787, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,45199,1732350106787, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs, maxLogs=32 2024-11-23T08:21:49,300 INFO [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b33d9013c60a%2C43417%2C1732350106962, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs, maxLogs=32 2024-11-23T08:21:49,300 INFO [RS:2;b33d9013c60a:39649 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b33d9013c60a%2C39649%2C1732350107049, suffix=, logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,39649,1732350107049, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs, maxLogs=32 2024-11-23T08:21:49,317 DEBUG [RS:2;b33d9013c60a:39649 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,39649,1732350107049/b33d9013c60a%2C39649%2C1732350107049.1732350109305, exclude list is [], retry=0 2024-11-23T08:21:49,317 DEBUG [RS:1;b33d9013c60a:43417 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962/b33d9013c60a%2C43417%2C1732350106962.1732350109305, exclude list is [], retry=0 2024-11-23T08:21:49,321 DEBUG [RS:0;b33d9013c60a:45199 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,45199,1732350106787/b33d9013c60a%2C45199%2C1732350106787.1732350109305, exclude list is [], retry=0 2024-11-23T08:21:49,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42473,DS-ece71f5d-bd97-40ff-a109-3f66859df621,DISK] 2024-11-23T08:21:49,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41035,DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58,DISK] 2024-11-23T08:21:49,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37117,DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0,DISK] 2024-11-23T08:21:49,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37117,DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0,DISK] 2024-11-23T08:21:49,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42473,DS-ece71f5d-bd97-40ff-a109-3f66859df621,DISK] 2024-11-23T08:21:49,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41035,DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58,DISK] 2024-11-23T08:21:49,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37117,DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0,DISK] 2024-11-23T08:21:49,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42473,DS-ece71f5d-bd97-40ff-a109-3f66859df621,DISK] 2024-11-23T08:21:49,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41035,DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58,DISK] 2024-11-23T08:21:49,359 INFO [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962/b33d9013c60a%2C43417%2C1732350106962.1732350109305 2024-11-23T08:21:49,360 INFO [RS:0;b33d9013c60a:45199 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,45199,1732350106787/b33d9013c60a%2C45199%2C1732350106787.1732350109305 2024-11-23T08:21:49,364 INFO [RS:2;b33d9013c60a:39649 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,39649,1732350107049/b33d9013c60a%2C39649%2C1732350107049.1732350109305 2024-11-23T08:21:49,368 DEBUG [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42885:42885),(127.0.0.1/127.0.0.1:42593:42593),(127.0.0.1/127.0.0.1:42215:42215)] 2024-11-23T08:21:49,368 DEBUG [RS:0;b33d9013c60a:45199 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42215:42215),(127.0.0.1/127.0.0.1:42885:42885),(127.0.0.1/127.0.0.1:42593:42593)] 2024-11-23T08:21:49,368 DEBUG [RS:2;b33d9013c60a:39649 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42215:42215),(127.0.0.1/127.0.0.1:42885:42885),(127.0.0.1/127.0.0.1:42593:42593)] 2024-11-23T08:21:49,404 DEBUG [b33d9013c60a:38501 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-23T08:21:49,413 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:21:49,423 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:21:49,423 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:21:49,423 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:21:49,424 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:21:49,424 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:21:49,424 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:21:49,424 INFO [b33d9013c60a:38501 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:21:49,424 INFO [b33d9013c60a:38501 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:21:49,424 INFO [b33d9013c60a:38501 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:21:49,424 DEBUG [b33d9013c60a:38501 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:21:49,432 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:21:49,439 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b33d9013c60a,43417,1732350106962, state=OPENING 2024-11-23T08:21:49,478 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T08:21:49,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:49,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:49,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:49,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:49,490 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,490 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,490 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,490 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,492 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T08:21:49,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:21:49,667 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:49,669 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42387, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:49,682 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T08:21:49,682 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T08:21:49,683 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T08:21:49,686 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b33d9013c60a%2C43417%2C1732350106962.meta, suffix=.meta, logDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962, archiveDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs, maxLogs=32 2024-11-23T08:21:49,702 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962/b33d9013c60a%2C43417%2C1732350106962.meta.1732350109688.meta, exclude list is [], retry=0 2024-11-23T08:21:49,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37117,DS-afc355f0-91cd-42de-9fa7-4bdaaae077e0,DISK] 2024-11-23T08:21:49,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41035,DS-68784e7f-be71-4c4d-94f4-e7d5c5342c58,DISK] 2024-11-23T08:21:49,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42473,DS-ece71f5d-bd97-40ff-a109-3f66859df621,DISK] 2024-11-23T08:21:49,711 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/WALs/b33d9013c60a,43417,1732350106962/b33d9013c60a%2C43417%2C1732350106962.meta.1732350109688.meta 2024-11-23T08:21:49,717 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42215:42215),(127.0.0.1/127.0.0.1:42593:42593),(127.0.0.1/127.0.0.1:42885:42885)] 2024-11-23T08:21:49,717 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T08:21:49,718 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-23T08:21:49,719 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:49,720 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T08:21:49,722 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T08:21:49,723 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T08:21:49,730 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T08:21:49,731 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:49,731 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T08:21:49,731 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T08:21:49,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T08:21:49,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T08:21:49,737 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:49,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:49,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T08:21:49,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T08:21:49,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:49,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:49,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T08:21:49,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T08:21:49,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:49,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:49,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T08:21:49,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T08:21:49,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:49,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T08:21:49,745 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T08:21:49,747 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740 2024-11-23T08:21:49,750 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740 2024-11-23T08:21:49,752 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T08:21:49,752 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T08:21:49,754 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T08:21:49,757 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T08:21:49,759 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66609149, jitterRate=-0.0074463337659835815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T08:21:49,760 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T08:21:49,764 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732350109732Writing region info on filesystem at 1732350109732Initializing all the Stores at 1732350109734 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350109734Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350109734Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350109734Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350109734Cleaning up temporary data from old regions at 1732350109752 (+18 ms)Running coprocessor post-open hooks at 1732350109760 (+8 ms)Region opened successfully at 1732350109763 (+3 ms) 2024-11-23T08:21:49,772 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732350109659 2024-11-23T08:21:49,783 DEBUG [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T08:21:49,783 INFO [RS_OPEN_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T08:21:49,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:21:49,788 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b33d9013c60a,43417,1732350106962, state=OPEN 2024-11-23T08:21:49,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T08:21:49,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T08:21:49,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T08:21:49,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T08:21:49,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,828 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T08:21:49,829 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b33d9013c60a,43417,1732350106962 2024-11-23T08:21:49,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T08:21:49,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b33d9013c60a,43417,1732350106962 in 335 msec 2024-11-23T08:21:49,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T08:21:49,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 866 msec 2024-11-23T08:21:49,850 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T08:21:49,851 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T08:21:49,870 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:49,872 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:49,895 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:49,897 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60701, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:49,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4620 sec 2024-11-23T08:21:49,958 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732350109958, completionTime=-1 2024-11-23T08:21:49,962 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-23T08:21:49,962 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T08:21:49,994 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-23T08:21:49,994 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732350169994 2024-11-23T08:21:49,995 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732350229995 2024-11-23T08:21:49,995 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-11-23T08:21:49,997 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:21:50,005 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,005 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,005 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,006 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b33d9013c60a:38501, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,007 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,007 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:50,013 DEBUG [master/b33d9013c60a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T08:21:50,040 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.833sec 2024-11-23T08:21:50,042 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T08:21:50,044 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T08:21:50,045 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T08:21:50,046 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T08:21:50,046 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T08:21:50,047 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T08:21:50,048 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T08:21:50,077 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T08:21:50,078 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is b33d9013c60a,38501,1732350105792 2024-11-23T08:21:50,081 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42aee284 2024-11-23T08:21:50,083 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T08:21:50,086 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T08:21:50,094 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T08:21:50,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-23T08:21:50,110 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:21:50,111 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:50,112 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-23T08:21:50,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T08:21:50,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:21:50,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62913773, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:50,163 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T08:21:50,163 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T08:21:50,169 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:21:50,172 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:21:50,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741837_1013 (size=349) 2024-11-23T08:21:50,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741837_1013 (size=349) 2024-11-23T08:21:50,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741837_1013 (size=349) 2024-11-23T08:21:50,193 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:21:50,195 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5c2409a8a8e1fdedb01950f8285b9431, NAME => 'hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:50,196 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:21:50,196 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:21:50,197 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fd96b23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:50,197 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:21:50,200 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:21:50,203 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:50,205 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:21:50,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9d1e90a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:50,208 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:50,217 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:50,217 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:50,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33740, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:50,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741838_1014 (size=36) 2024-11-23T08:21:50,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741838_1014 (size=36) 2024-11-23T08:21:50,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b33d9013c60a,38501,1732350105792 2024-11-23T08:21:50,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/test.cache.data in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T08:21:50,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/nfs.dump.dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741838_1014 (size=36) 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T08:21:50,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 5c2409a8a8e1fdedb01950f8285b9431, disabling compactions & flushes 2024-11-23T08:21:50,227 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. after waiting 0 ms 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,227 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,227 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5c2409a8a8e1fdedb01950f8285b9431: Waiting for close lock at 1732350110227Disabling compacts and flushes for region at 1732350110227Disabling writes for close at 1732350110227Writing region close event to WAL at 1732350110227Closed at 1732350110227 2024-11-23T08:21:50,230 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:21:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T08:21:50,238 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732350110231"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350110231"}]},"ts":"1732350110231"} 2024-11-23T08:21:50,246 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T08:21:50,251 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:21:50,254 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350110251"}]},"ts":"1732350110251"} 2024-11-23T08:21:50,261 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-23T08:21:50,262 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:21:50,263 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:21:50,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:21:50,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:21:50,264 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:21:50,264 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:21:50,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5c2409a8a8e1fdedb01950f8285b9431, ASSIGN}] 2024-11-23T08:21:50,276 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5c2409a8a8e1fdedb01950f8285b9431, ASSIGN 2024-11-23T08:21:50,279 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=5c2409a8a8e1fdedb01950f8285b9431, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:21:50,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741839_1015 (size=592039) 2024-11-23T08:21:50,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741839_1015 (size=592039) 2024-11-23T08:21:50,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741839_1015 (size=592039) 2024-11-23T08:21:50,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T08:21:50,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T08:21:50,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T08:21:50,433 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T08:21:50,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c2409a8a8e1fdedb01950f8285b9431, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:21:50,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T08:21:50,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=5c2409a8a8e1fdedb01950f8285b9431, ASSIGN because future has completed 2024-11-23T08:21:50,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c2409a8a8e1fdedb01950f8285b9431, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:21:50,643 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:50,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36249, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:50,671 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,671 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c2409a8a8e1fdedb01950f8285b9431, NAME => 'hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.', STARTKEY => '', ENDKEY => ''} 2024-11-23T08:21:50,672 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. service=AccessControlService 2024-11-23T08:21:50,672 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:50,672 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,672 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:50,673 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,673 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,678 INFO [StoreOpener-5c2409a8a8e1fdedb01950f8285b9431-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,681 INFO [StoreOpener-5c2409a8a8e1fdedb01950f8285b9431-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c2409a8a8e1fdedb01950f8285b9431 columnFamilyName l 2024-11-23T08:21:50,681 DEBUG [StoreOpener-5c2409a8a8e1fdedb01950f8285b9431-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:50,683 INFO [StoreOpener-5c2409a8a8e1fdedb01950f8285b9431-1 {}] regionserver.HStore(327): Store=5c2409a8a8e1fdedb01950f8285b9431/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:50,684 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,686 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,692 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,693 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,693 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,701 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,717 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:50,718 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 5c2409a8a8e1fdedb01950f8285b9431; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70221283, jitterRate=0.04637865722179413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:21:50,718 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:21:50,722 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5c2409a8a8e1fdedb01950f8285b9431: Running coprocessor pre-open hook at 1732350110674Writing region info on filesystem at 1732350110675 (+1 ms)Initializing all the Stores at 1732350110677 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732350110677Cleaning up temporary data from old regions at 1732350110693 (+16 ms)Running coprocessor post-open hooks at 1732350110719 (+26 ms)Region opened successfully at 1732350110722 (+3 ms) 2024-11-23T08:21:50,729 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., pid=6, masterSystemTime=1732350110642 2024-11-23T08:21:50,737 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5c2409a8a8e1fdedb01950f8285b9431, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:21:50,741 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,741 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:21:50,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c2409a8a8e1fdedb01950f8285b9431, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:21:50,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T08:21:50,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c2409a8a8e1fdedb01950f8285b9431, server=b33d9013c60a,39649,1732350107049 in 286 msec 2024-11-23T08:21:50,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T08:21:50,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=5c2409a8a8e1fdedb01950f8285b9431, ASSIGN in 492 msec 2024-11-23T08:21:50,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T08:21:50,770 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:21:50,770 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350110770"}]},"ts":"1732350110770"} 2024-11-23T08:21:50,774 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-23T08:21:50,777 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:21:50,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 678 msec 2024-11-23T08:21:51,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T08:21:51,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-23T08:21:51,295 DEBUG [master/b33d9013c60a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T08:21:51,297 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T08:21:51,298 INFO [master/b33d9013c60a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b33d9013c60a,38501,1732350105792-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T08:21:52,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:52,385 WARN [Thread-384 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:52,652 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:52,653 WARN [Thread-384 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T08:21:52,654 INFO [Thread-384 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:52,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:52,668 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:52,668 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:52,670 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:52,673 INFO [Thread-384 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:52,674 INFO [Thread-384 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:52,674 INFO [Thread-384 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:52,676 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17a93e7b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:52,677 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@339f0d8f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:52,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddf8f6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:52,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32fc7a65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:52,850 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-11-23T08:21:52,850 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-11-23T08:21:52,850 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T08:21:52,852 INFO [Thread-384 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T08:21:52,914 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:53,124 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:53,591 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-11-23T08:21:53,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39ead806{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-38091-hadoop-yarn-common-3_4_1_jar-_-any-2058405672615334143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T08:21:53,620 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e976c57{HTTP/1.1, (http/1.1)}{localhost:38091} 2024-11-23T08:21:53,620 INFO [Time-limited test {}] server.Server(415): Started @17190ms 2024-11-23T08:21:53,624 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d640796{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-36995-hadoop-yarn-common-3_4_1_jar-_-any-6696098952183319052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T08:21:53,625 INFO [Thread-384 {}] server.AbstractConnector(333): Started ServerConnector@734bdfe6{HTTP/1.1, (http/1.1)}{localhost:36995} 2024-11-23T08:21:53,625 INFO [Thread-384 {}] server.Server(415): Started @17194ms 2024-11-23T08:21:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741841_1017 (size=5) 2024-11-23T08:21:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741841_1017 (size=5) 2024-11-23T08:21:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741841_1017 (size=5) 2024-11-23T08:21:54,651 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T08:21:54,658 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:54,725 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T08:21:54,727 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:54,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:54,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:54,765 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T08:21:54,767 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:54,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@543a633d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:54,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@573f63f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:54,830 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T08:21:54,830 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T08:21:54,830 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T08:21:54,830 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T08:21:54,843 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:54,869 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:55,059 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:55,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@261d7d33{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-46755-hadoop-yarn-common-3_4_1_jar-_-any-13552320018032609575/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T08:21:55,080 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58ab1eff{HTTP/1.1, (http/1.1)}{localhost:46755} 2024-11-23T08:21:55,080 INFO [Time-limited test {}] server.Server(415): Started @18650ms 2024-11-23T08:21:55,250 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T08:21:55,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:55,260 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:21:55,277 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T08:21:55,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T08:21:55,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T08:21:55,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T08:21:55,302 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T08:21:55,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T08:21:55,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6632de68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,AVAILABLE} 2024-11-23T08:21:55,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@dbc49a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T08:21:55,366 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T08:21:55,366 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T08:21:55,367 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T08:21:55,367 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T08:21:55,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T08:21:55,371 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-23T08:21:55,381 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:55,389 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:55,544 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T08:21:55,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43c039d9{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/java.io.tmpdir/jetty-localhost-44979-hadoop-yarn-common-3_4_1_jar-_-any-8563602012763072067/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T08:21:55,551 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d38c72c{HTTP/1.1, (http/1.1)}{localhost:44979} 2024-11-23T08:21:55,551 INFO [Time-limited test {}] server.Server(415): Started @19120ms 2024-11-23T08:21:55,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-23T08:21:55,618 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:21:55,688 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=719, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=263, ProcessCount=11, AvailableMemoryMB=6386 2024-11-23T08:21:55,694 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-11-23T08:21:55,701 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T08:21:55,708 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b33d9013c60a,38501,1732350105792 2024-11-23T08:21:55,708 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a9a4c16 2024-11-23T08:21:55,708 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T08:21:55,714 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T08:21:55,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:21:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:55,721 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:21:55,723 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:55,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-11-23T08:21:55,727 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:21:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T08:21:55,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741842_1018 (size=422) 2024-11-23T08:21:55,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741842_1018 (size=422) 2024-11-23T08:21:55,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741842_1018 (size=422) 2024-11-23T08:21:55,796 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ef5793d8f5c1e43ccf012959a4988eca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:55,799 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => de46fac4fb8b590f1d6ffea30a7de582, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:55,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741844_1020 (size=83) 2024-11-23T08:21:55,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741844_1020 (size=83) 2024-11-23T08:21:55,830 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:55,831 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing de46fac4fb8b590f1d6ffea30a7de582, disabling compactions & flushes 2024-11-23T08:21:55,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741843_1019 (size=83) 2024-11-23T08:21:55,831 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:55,831 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:55,831 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. after waiting 0 ms 2024-11-23T08:21:55,831 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:55,831 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:55,831 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for de46fac4fb8b590f1d6ffea30a7de582: Waiting for close lock at 1732350115830Disabling compacts and flushes for region at 1732350115831 (+1 ms)Disabling writes for close at 1732350115831Writing region close event to WAL at 1732350115831Closed at 1732350115831 2024-11-23T08:21:55,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741844_1020 (size=83) 2024-11-23T08:21:55,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741843_1019 (size=83) 2024-11-23T08:21:55,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741843_1019 (size=83) 2024-11-23T08:21:55,833 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:55,834 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing ef5793d8f5c1e43ccf012959a4988eca, disabling compactions & flushes 2024-11-23T08:21:55,834 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:55,834 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:55,834 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. after waiting 0 ms 2024-11-23T08:21:55,834 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:55,834 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:55,834 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for ef5793d8f5c1e43ccf012959a4988eca: Waiting for close lock at 1732350115834Disabling compacts and flushes for region at 1732350115834Disabling writes for close at 1732350115834Writing region close event to WAL at 1732350115834Closed at 1732350115834 2024-11-23T08:21:55,836 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:21:55,837 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732350115837"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350115837"}]},"ts":"1732350115837"} 2024-11-23T08:21:55,837 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732350115837"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350115837"}]},"ts":"1732350115837"} 2024-11-23T08:21:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T08:21:55,874 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:21:55,877 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:21:55,878 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350115878"}]},"ts":"1732350115878"} 2024-11-23T08:21:55,881 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T08:21:55,882 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:21:55,886 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:21:55,887 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:21:55,887 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:21:55,887 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:21:55,887 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:21:55,888 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, ASSIGN}] 2024-11-23T08:21:55,892 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, ASSIGN 2024-11-23T08:21:55,893 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, ASSIGN 2024-11-23T08:21:55,895 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, ASSIGN; state=OFFLINE, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:21:55,895 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:21:56,046 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:21:56,047 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=de46fac4fb8b590f1d6ffea30a7de582, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:21:56,047 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ef5793d8f5c1e43ccf012959a4988eca, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:21:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T08:21:56,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, ASSIGN because future has completed 2024-11-23T08:21:56,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:21:56,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, ASSIGN because future has completed 2024-11-23T08:21:56,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:21:56,229 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:56,233 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:56,234 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => ef5793d8f5c1e43ccf012959a4988eca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:21:56,234 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. service=AccessControlService 2024-11-23T08:21:56,235 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:56,235 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,235 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:56,235 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,236 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,240 INFO [StoreOpener-ef5793d8f5c1e43ccf012959a4988eca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,248 INFO [StoreOpener-ef5793d8f5c1e43ccf012959a4988eca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef5793d8f5c1e43ccf012959a4988eca columnFamilyName cf 2024-11-23T08:21:56,248 DEBUG [StoreOpener-ef5793d8f5c1e43ccf012959a4988eca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:56,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:21:56,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-23T08:21:56,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T08:21:56,249 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T08:21:56,249 INFO [StoreOpener-ef5793d8f5c1e43ccf012959a4988eca-1 {}] regionserver.HStore(327): Store=ef5793d8f5c1e43ccf012959a4988eca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:56,251 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T08:21:56,251 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-23T08:21:56,252 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:21:56,252 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-23T08:21:56,253 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T08:21:56,253 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-23T08:21:56,253 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:21:56,253 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-23T08:21:56,253 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T08:21:56,253 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T08:21:56,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T08:21:56,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T08:21:56,255 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,257 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,258 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,259 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,259 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,264 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,264 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45861, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:56,278 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:56,279 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened ef5793d8f5c1e43ccf012959a4988eca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60101709, jitterRate=-0.10441474616527557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:21:56,279 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:56,282 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for ef5793d8f5c1e43ccf012959a4988eca: Running coprocessor pre-open hook at 1732350116236Writing region info on filesystem at 1732350116236Initializing all the Stores at 1732350116239 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350116239Cleaning up temporary data from old regions at 1732350116259 (+20 ms)Running coprocessor post-open hooks at 1732350116279 (+20 ms)Region opened successfully at 1732350116282 (+3 ms) 2024-11-23T08:21:56,283 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:56,283 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => de46fac4fb8b590f1d6ffea30a7de582, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:21:56,284 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. service=AccessControlService 2024-11-23T08:21:56,284 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:56,284 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,284 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:56,285 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,285 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,287 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca., pid=10, masterSystemTime=1732350116222 2024-11-23T08:21:56,297 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:56,297 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:56,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=ef5793d8f5c1e43ccf012959a4988eca, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:21:56,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:21:56,306 INFO [StoreOpener-de46fac4fb8b590f1d6ffea30a7de582-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,313 INFO [StoreOpener-de46fac4fb8b590f1d6ffea30a7de582-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region de46fac4fb8b590f1d6ffea30a7de582 columnFamilyName cf 2024-11-23T08:21:56,313 DEBUG [StoreOpener-de46fac4fb8b590f1d6ffea30a7de582-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:56,313 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b33d9013c60a,39649,1732350107049, table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T08:21:56,316 INFO [StoreOpener-de46fac4fb8b590f1d6ffea30a7de582-1 {}] regionserver.HStore(327): Store=de46fac4fb8b590f1d6ffea30a7de582/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:56,316 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,318 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,319 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,321 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,321 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-23T08:21:56,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049 in 258 msec 2024-11-23T08:21:56,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, ASSIGN in 433 msec 2024-11-23T08:21:56,325 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,331 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:56,333 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened de46fac4fb8b590f1d6ffea30a7de582; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60572829, jitterRate=-0.09739451110363007}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:21:56,333 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:56,333 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for de46fac4fb8b590f1d6ffea30a7de582: Running coprocessor pre-open hook at 1732350116285Writing region info on filesystem at 1732350116285Initializing all the Stores at 1732350116288 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350116289 (+1 ms)Cleaning up temporary data from old regions at 1732350116321 (+32 ms)Running coprocessor post-open hooks at 1732350116333 (+12 ms)Region opened successfully at 1732350116333 2024-11-23T08:21:56,338 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., pid=11, masterSystemTime=1732350116228 2024-11-23T08:21:56,342 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:56,342 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:56,344 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=de46fac4fb8b590f1d6ffea30a7de582, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:21:56,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:21:56,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-23T08:21:56,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787 in 288 msec 2024-11-23T08:21:56,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T08:21:56,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-23T08:21:56,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, ASSIGN in 468 msec 2024-11-23T08:21:56,366 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:21:56,366 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350116366"}]},"ts":"1732350116366"} 2024-11-23T08:21:56,371 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T08:21:56,375 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:21:56,379 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T08:21:56,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:21:56,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44599, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,402 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:56,402 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:56,402 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40221, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:21:56,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39025, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,432 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:56,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:21:56,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:56,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:56,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:56,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:56,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 824 msec 2024-11-23T08:21:56,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T08:21:56,868 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:21:56,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-11-23T08:21:56,869 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:21:56,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-11-23T08:21:56,877 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:21:56,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-11-23T08:21:56,881 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:21:56,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:21:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350116899 (current time:1732350116899). 2024-11-23T08:21:56,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:21:56,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T08:21:56,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:21:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cf473e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:21:56,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:21:56,906 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:21:56,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:21:56,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:21:56,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3706ed94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:21:56,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:21:56,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,909 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:21:56,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41ad230e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:56,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:56,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,916 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:21:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:21:56,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,929 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:21:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@730bee88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:21:56,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:21:56,933 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:21:56,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:21:56,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:21:56,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4d4951, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:21:56,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:21:56,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,937 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:21:56,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49bce706, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:56,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:56,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:56,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,944 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53596, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,947 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:21:56,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:56,950 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:56,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:21:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:21:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:56,953 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:21:56,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T08:21:56,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:21:56,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:21:56,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T08:21:56,970 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:21:56,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T08:21:56,976 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:21:56,989 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:21:57,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741845_1021 (size=215) 2024-11-23T08:21:57,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741845_1021 (size=215) 2024-11-23T08:21:57,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741845_1021 (size=215) 2024-11-23T08:21:57,010 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:21:57,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582}] 2024-11-23T08:21:57,018 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:57,018 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:57,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T08:21:57,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-23T08:21:57,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-23T08:21:57,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:57,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:57,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for ef5793d8f5c1e43ccf012959a4988eca: 2024-11-23T08:21:57,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for de46fac4fb8b590f1d6ffea30a7de582: 2024-11-23T08:21:57,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:21:57,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:21:57,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:21:57,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:21:57,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:21:57,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:21:57,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741847_1023 (size=86) 2024-11-23T08:21:57,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741847_1023 (size=86) 2024-11-23T08:21:57,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741846_1022 (size=86) 2024-11-23T08:21:57,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741846_1022 (size=86) 2024-11-23T08:21:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741847_1023 (size=86) 2024-11-23T08:21:57,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741846_1022 (size=86) 2024-11-23T08:21:57,220 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:57,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-23T08:21:57,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T08:21:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-23T08:21:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-23T08:21:57,225 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:57,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:57,225 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:57,225 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:57,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 in 214 msec 2024-11-23T08:21:57,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-23T08:21:57,232 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:21:57,233 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca in 214 msec 2024-11-23T08:21:57,235 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:21:57,238 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:21:57,239 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,242 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741848_1024 (size=597) 2024-11-23T08:21:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741848_1024 (size=597) 2024-11-23T08:21:57,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741848_1024 (size=597) 2024-11-23T08:21:57,269 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:21:57,284 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:21:57,285 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T08:21:57,288 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:21:57,288 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T08:21:57,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 324 msec 2024-11-23T08:21:57,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T08:21:57,599 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:21:57,614 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='0473e980a80647728ed2359ed48566f9a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:21:57,620 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='1d9c3d9fa1f89f7995a0b8bb3b34fecfe', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,622 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='2cd21c2e034fc3f0127276053101cf37a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,624 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='34ed2316caba79a49531508dce216ad92', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,626 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='44cc914fcb11e5a28e4b9486fbe24fc45', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,628 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='6a5e0ca74317dbf956dfc56062646fa76', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,628 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:57,629 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:57,629 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='54c6a13245262d4fb18ed1289e2b15866', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:21:57,633 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:57,634 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37752, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:57,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:21:57,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:21:57,651 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:21:57,660 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:57,661 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:57,662 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:21:57,665 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:21:57,683 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:21:57,694 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:21:57,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:21:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350117700 (current time:1732350117700). 2024-11-23T08:21:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:21:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T08:21:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:21:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f7b15a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:21:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:21:57,703 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:21:57,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:21:57,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:21:57,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60e8cdc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:21:57,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:21:57,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,706 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:21:57,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30b4bcb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:57,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:57,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:57,712 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53604, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:57,713 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:21:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:21:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,715 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:21:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f5a9932, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:21:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:21:57,721 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:21:57,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:21:57,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:21:57,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cc7c6c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:21:57,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:21:57,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,724 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:21:57,725 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a90850a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:21:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:21:57,726 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:21:57,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:57,728 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:57,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:21:57,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:21:57,732 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:21:57,734 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:21:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:21:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:21:57,734 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:21:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T08:21:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:21:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:21:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T08:21:57,739 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:21:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T08:21:57,740 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:21:57,744 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:21:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741849_1025 (size=210) 2024-11-23T08:21:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741849_1025 (size=210) 2024-11-23T08:21:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741849_1025 (size=210) 2024-11-23T08:21:57,756 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:21:57,756 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582}] 2024-11-23T08:21:57,758 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:57,758 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T08:21:57,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-23T08:21:57,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-23T08:21:57,912 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:57,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:57,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing ef5793d8f5c1e43ccf012959a4988eca 1/1 column families, dataSize=534 B heapSize=1.38 KB 2024-11-23T08:21:57,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing de46fac4fb8b590f1d6ffea30a7de582 1/1 column families, dataSize=2.74 KB heapSize=6.16 KB 2024-11-23T08:21:58,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/.tmp/cf/0dd830557eb545559d66630805390ffe is 71, key is 00e393311d70689e4644dda786fbb9e0/cf:q/1732350117635/Put/seqid=0 2024-11-23T08:21:58,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/.tmp/cf/965b067889b54ec6aa0133301e35c2b9 is 71, key is 11779f7232a0b03326979c1462e1b711/cf:q/1732350117641/Put/seqid=0 2024-11-23T08:21:58,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741850_1026 (size=5634) 2024-11-23T08:21:58,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741850_1026 (size=5634) 2024-11-23T08:21:58,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741850_1026 (size=5634) 2024-11-23T08:21:58,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=534 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/.tmp/cf/0dd830557eb545559d66630805390ffe 2024-11-23T08:21:58,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741851_1027 (size=7984) 2024-11-23T08:21:58,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741851_1027 (size=7984) 2024-11-23T08:21:58,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741851_1027 (size=7984) 2024-11-23T08:21:58,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.74 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/.tmp/cf/965b067889b54ec6aa0133301e35c2b9 2024-11-23T08:21:58,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T08:21:58,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/.tmp/cf/965b067889b54ec6aa0133301e35c2b9 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9 2024-11-23T08:21:58,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/.tmp/cf/0dd830557eb545559d66630805390ffe as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe 2024-11-23T08:21:58,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9, entries=42, sequenceid=6, filesize=7.8 K 2024-11-23T08:21:58,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe, entries=8, sequenceid=6, filesize=5.5 K 2024-11-23T08:21:58,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.74 KB/2802, heapSize ~6.14 KB/6288, currentSize=0 B/0 for de46fac4fb8b590f1d6ffea30a7de582 in 227ms, sequenceid=6, compaction requested=false 2024-11-23T08:21:58,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~534 B/534, heapSize ~1.36 KB/1392, currentSize=0 B/0 for ef5793d8f5c1e43ccf012959a4988eca in 225ms, sequenceid=6, compaction requested=false 2024-11-23T08:21:58,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-23T08:21:58,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-23T08:21:58,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for de46fac4fb8b590f1d6ffea30a7de582: 2024-11-23T08:21:58,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for ef5793d8f5c1e43ccf012959a4988eca: 2024-11-23T08:21:58,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:21:58,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe] hfiles 2024-11-23T08:21:58,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9] hfiles 2024-11-23T08:21:58,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741852_1028 (size=125) 2024-11-23T08:21:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741852_1028 (size=125) 2024-11-23T08:21:58,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741852_1028 (size=125) 2024-11-23T08:21:58,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:21:58,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-23T08:21:58,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-23T08:21:58,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:58,180 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:21:58,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef5793d8f5c1e43ccf012959a4988eca in 426 msec 2024-11-23T08:21:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741853_1029 (size=125) 2024-11-23T08:21:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741853_1029 (size=125) 2024-11-23T08:21:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741853_1029 (size=125) 2024-11-23T08:21:58,194 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:21:58,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T08:21:58,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-23T08:21:58,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:58,196 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:21:58,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-23T08:21:58,201 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:21:58,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de46fac4fb8b590f1d6ffea30a7de582 in 441 msec 2024-11-23T08:21:58,202 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:21:58,204 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:21:58,204 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,206 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741854_1030 (size=675) 2024-11-23T08:21:58,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741854_1030 (size=675) 2024-11-23T08:21:58,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741854_1030 (size=675) 2024-11-23T08:21:58,231 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:21:58,243 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:21:58,244 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,247 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:21:58,247 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T08:21:58,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 511 msec 2024-11-23T08:21:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T08:21:58,368 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:21:58,395 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:58,396 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:58,397 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:21:58,397 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39500, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:58,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:58,399 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T08:21:58,399 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T08:21:58,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:21:58,400 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45199 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T08:21:58,403 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:21:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:21:58,406 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:21:58,406 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:58,406 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-11-23T08:21:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T08:21:58,409 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:21:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741855_1031 (size=390) 2024-11-23T08:21:58,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741855_1031 (size=390) 2024-11-23T08:21:58,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741855_1031 (size=390) 2024-11-23T08:21:58,453 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d313bbbd59f37fba635a18a29ad0bf4b, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:21:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T08:21:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741856_1032 (size=75) 2024-11-23T08:21:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741856_1032 (size=75) 2024-11-23T08:21:58,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741856_1032 (size=75) 2024-11-23T08:21:58,528 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:58,528 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing d313bbbd59f37fba635a18a29ad0bf4b, disabling compactions & flushes 2024-11-23T08:21:58,528 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,528 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,528 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. after waiting 0 ms 2024-11-23T08:21:58,528 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,528 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,529 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for d313bbbd59f37fba635a18a29ad0bf4b: Waiting for close lock at 1732350118528Disabling compacts and flushes for region at 1732350118528Disabling writes for close at 1732350118528Writing region close event to WAL at 1732350118528Closed at 1732350118528 2024-11-23T08:21:58,531 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:21:58,532 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732350118532"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350118532"}]},"ts":"1732350118532"} 2024-11-23T08:21:58,538 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T08:21:58,541 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:21:58,541 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350118541"}]},"ts":"1732350118541"} 2024-11-23T08:21:58,544 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T08:21:58,545 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:21:58,546 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:21:58,546 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:21:58,547 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:21:58,547 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:21:58,547 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:21:58,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, ASSIGN}] 2024-11-23T08:21:58,549 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, ASSIGN 2024-11-23T08:21:58,551 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, ASSIGN; state=OFFLINE, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:21:58,702 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T08:21:58,702 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d313bbbd59f37fba635a18a29ad0bf4b, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:21:58,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, ASSIGN because future has completed 2024-11-23T08:21:58,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:21:58,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T08:21:58,868 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,868 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => d313bbbd59f37fba635a18a29ad0bf4b, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.', STARTKEY => '', ENDKEY => ''} 2024-11-23T08:21:58,869 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. service=AccessControlService 2024-11-23T08:21:58,869 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:21:58,869 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,869 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:21:58,869 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,869 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,884 INFO [StoreOpener-d313bbbd59f37fba635a18a29ad0bf4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,945 INFO [StoreOpener-d313bbbd59f37fba635a18a29ad0bf4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d313bbbd59f37fba635a18a29ad0bf4b columnFamilyName cf 2024-11-23T08:21:58,945 DEBUG [StoreOpener-d313bbbd59f37fba635a18a29ad0bf4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:21:58,948 INFO [StoreOpener-d313bbbd59f37fba635a18a29ad0bf4b-1 {}] regionserver.HStore(327): Store=d313bbbd59f37fba635a18a29ad0bf4b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:21:58,950 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,952 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,953 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,954 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,954 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,958 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,968 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:21:58,970 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened d313bbbd59f37fba635a18a29ad0bf4b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71440380, jitterRate=0.06454461812973022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:21:58,970 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:21:58,973 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for d313bbbd59f37fba635a18a29ad0bf4b: Running coprocessor pre-open hook at 1732350118870Writing region info on filesystem at 1732350118870Initializing all the Stores at 1732350118872 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350118872Cleaning up temporary data from old regions at 1732350118954 (+82 ms)Running coprocessor post-open hooks at 1732350118970 (+16 ms)Region opened successfully at 1732350118973 (+3 ms) 2024-11-23T08:21:58,978 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., pid=20, masterSystemTime=1732350118861 2024-11-23T08:21:58,982 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,983 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:21:58,985 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d313bbbd59f37fba635a18a29ad0bf4b, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:21:58,989 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b33d9013c60a,45199,1732350106787, table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T08:21:58,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:21:59,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-23T08:21:59,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787 in 294 msec 2024-11-23T08:21:59,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T08:21:59,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, ASSIGN in 463 msec 2024-11-23T08:21:59,018 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:21:59,018 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350119018"}]},"ts":"1732350119018"} 2024-11-23T08:21:59,023 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T08:21:59,025 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:21:59,026 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T08:21:59,034 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T08:21:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T08:21:59,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:59,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:59,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:59,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:21:59,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,100 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 694 msec 2024-11-23T08:21:59,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:21:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T08:21:59,548 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:21:59,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:21:59,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T08:22:01,747 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:22:01,915 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-11-23T08:22:02,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T08:22:02,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T08:22:02,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T08:22:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T08:22:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T08:22:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T08:22:05,541 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1732350119558/Put/seqid=0 2024-11-23T08:22:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T08:22:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T08:22:05,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T08:22:05,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7f6678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:05,552 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:22:05,553 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:22:05,556 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:22:05,557 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:22:05,557 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:22:05,557 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe750b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:05,557 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:22:05,558 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:22:05,558 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:05,568 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:22:05,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@511d5060, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:05,576 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:22:05,578 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:22:05,578 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:05,599 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59048, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:22:05,640 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:34105/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T08:22:05,641 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T08:22:05,643 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is b33d9013c60a,38501,1732350105792 2024-11-23T08:22:05,643 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@bf83888 2024-11-23T08:22:05,643 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T08:22:05,646 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39850, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T08:22:05,654 WARN [IPC Server handler 3 on default port 34105 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T08:22:05,663 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:22:05,668 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:05,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:22:05,679 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:22:05,707 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34105/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-11-23T08:22:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:22:05,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:22:05,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:05,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41447, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-23T08:22:05,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T08:22:05,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:41447 deadline: 1732350185743, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-11-23T08:22:05,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:22:05,758 WARN [IPC Server handler 3 on default port 34105 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T08:22:05,987 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34105/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file for inclusion in d313bbbd59f37fba635a18a29ad0bf4b/cf 2024-11-23T08:22:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-11-23T08:22:05,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-23T08:22:05,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:34105/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T08:22:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegion(2603): Flush status journal for d313bbbd59f37fba635a18a29ad0bf4b: 2024-11-23T08:22:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:34105/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/output/cf/test_file to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/staging/jenkins__testExportFileSystemStateWithSplitRegion__2dsmb6pdcq7esj20gn2n4m2ba4qpmgg0h1qsknv65ubdqf7r00ilpha1hj4ug9j3/cf/test_file 2024-11-23T08:22:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/staging/jenkins__testExportFileSystemStateWithSplitRegion__2dsmb6pdcq7esj20gn2n4m2ba4qpmgg0h1qsknv65ubdqf7r00ilpha1hj4ug9j3/cf/test_file as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ 2024-11-23T08:22:06,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/staging/jenkins__testExportFileSystemStateWithSplitRegion__2dsmb6pdcq7esj20gn2n4m2ba4qpmgg0h1qsknv65ubdqf7r00ilpha1hj4ug9j3/cf/test_file into d313bbbd59f37fba635a18a29ad0bf4b/cf as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ - updating store file list. 2024-11-23T08:22:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 53022b966121430ab7e1382c65d8e3c2_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T08:22:06,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ into d313bbbd59f37fba635a18a29ad0bf4b/cf 2024-11-23T08:22:06,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/staging/jenkins__testExportFileSystemStateWithSplitRegion__2dsmb6pdcq7esj20gn2n4m2ba4qpmgg0h1qsknv65ubdqf7r00ilpha1hj4ug9j3/cf/test_file into d313bbbd59f37fba635a18a29ad0bf4b/cf (new location: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_) 2024-11-23T08:22:06,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/staging/jenkins__testExportFileSystemStateWithSplitRegion__2dsmb6pdcq7esj20gn2n4m2ba4qpmgg0h1qsknv65ubdqf7r00ilpha1hj4ug9j3/cf/test_file 2024-11-23T08:22:06,038 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T08:22:06,038 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:22:06,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:06,039 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:22:06,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:06,040 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:22:06,041 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=b33d9013c60a:45199 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T08:22:06,042 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T08:22:06,042 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2 from cache 2024-11-23T08:22:06,043 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-23T08:22:06,049 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:22:06,061 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:22:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:06,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d313bbbd59f37fba635a18a29ad0bf4b, daughterA=9611d4d63053ea5e9af2e619420f3eca, daughterB=ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:06,077 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d313bbbd59f37fba635a18a29ad0bf4b, daughterA=9611d4d63053ea5e9af2e619420f3eca, daughterB=ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:06,077 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d313bbbd59f37fba635a18a29ad0bf4b, daughterA=9611d4d63053ea5e9af2e619420f3eca, daughterB=ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:06,077 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d313bbbd59f37fba635a18a29ad0bf4b, daughterA=9611d4d63053ea5e9af2e619420f3eca, daughterB=ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:06,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, UNASSIGN}] 2024-11-23T08:22:06,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, UNASSIGN 2024-11-23T08:22:06,110 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d313bbbd59f37fba635a18a29ad0bf4b, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:06,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, UNASSIGN because future has completed 2024-11-23T08:22:06,116 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T08:22:06,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:22:06,148 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b33d9013c60a:43417 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-11-23T08:22:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:06,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:06,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T08:22:06,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:06,249 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T08:22:06,289 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:22:06,291 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T08:22:06,298 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing d313bbbd59f37fba635a18a29ad0bf4b, disabling compactions & flushes 2024-11-23T08:22:06,298 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:22:06,299 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:22:06,299 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. after waiting 0 ms 2024-11-23T08:22:06,299 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:22:06,336 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-23T08:22:06,341 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:22:06,341 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b. 2024-11-23T08:22:06,341 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for d313bbbd59f37fba635a18a29ad0bf4b: Waiting for close lock at 1732350126298Running coprocessor pre-close hooks at 1732350126298Disabling compacts and flushes for region at 1732350126298Disabling writes for close at 1732350126299 (+1 ms)Writing region close event to WAL at 1732350126328 (+29 ms)Running coprocessor post-close hooks at 1732350126337 (+9 ms)Closed at 1732350126341 (+4 ms) 2024-11-23T08:22:06,346 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:22:06,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d313bbbd59f37fba635a18a29ad0bf4b, regionState=CLOSED 2024-11-23T08:22:06,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:22:06,371 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T08:22:06,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure d313bbbd59f37fba635a18a29ad0bf4b, server=b33d9013c60a,45199,1732350106787 in 239 msec 2024-11-23T08:22:06,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-23T08:22:06,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d313bbbd59f37fba635a18a29ad0bf4b, UNASSIGN in 282 msec 2024-11-23T08:22:06,394 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:06,398 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=d313bbbd59f37fba635a18a29ad0bf4b, threads=1 2024-11-23T08:22:06,402 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ for region: d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:22:06,426 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 53022b966121430ab7e1382c65d8e3c2_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T08:22:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741860_1036 (size=21) 2024-11-23T08:22:06,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741860_1036 (size=21) 2024-11-23T08:22:06,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741860_1036 (size=21) 2024-11-23T08:22:06,474 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 53022b966121430ab7e1382c65d8e3c2_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T08:22:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741861_1037 (size=21) 2024-11-23T08:22:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741861_1037 (size=21) 2024-11-23T08:22:06,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741861_1037 (size=21) 2024-11-23T08:22:06,486 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ for region: d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:22:06,490 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region d313bbbd59f37fba635a18a29ad0bf4b Daughter A: [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b] storefiles, Daughter B: [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b] storefiles. 2024-11-23T08:22:06,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741862_1038 (size=76) 2024-11-23T08:22:06,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741862_1038 (size=76) 2024-11-23T08:22:06,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741862_1038 (size=76) 2024-11-23T08:22:06,549 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:06,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741863_1039 (size=76) 2024-11-23T08:22:06,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741863_1039 (size=76) 2024-11-23T08:22:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741863_1039 (size=76) 2024-11-23T08:22:06,628 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:06,758 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T08:22:06,763 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T08:22:06,769 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732350126768"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1732350126768"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1732350126768"}]},"ts":"1732350126768"} 2024-11-23T08:22:06,769 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732350126768"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350126768"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732350126768"}]},"ts":"1732350126768"} 2024-11-23T08:22:06,769 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732350126768"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350126768"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732350126768"}]},"ts":"1732350126768"} 2024-11-23T08:22:06,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, ASSIGN}] 2024-11-23T08:22:06,800 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, ASSIGN 2024-11-23T08:22:06,800 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, ASSIGN 2024-11-23T08:22:06,802 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, ASSIGN; state=SPLITTING_NEW, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:22:06,809 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, ASSIGN; state=SPLITTING_NEW, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:22:06,953 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:22:06,954 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=ebeda0ea67c69d7eab974f2255a55d4f, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:06,954 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=9611d4d63053ea5e9af2e619420f3eca, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:06,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, ASSIGN because future has completed 2024-11-23T08:22:06,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:22:06,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, ASSIGN because future has completed 2024-11-23T08:22:06,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:22:07,122 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:22:07,123 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 9611d4d63053ea5e9af2e619420f3eca, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.', STARTKEY => '', ENDKEY => '5'} 2024-11-23T08:22:07,123 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. service=AccessControlService 2024-11-23T08:22:07,124 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:22:07,124 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,124 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:22:07,124 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,124 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,127 INFO [StoreOpener-9611d4d63053ea5e9af2e619420f3eca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,129 INFO [StoreOpener-9611d4d63053ea5e9af2e619420f3eca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9611d4d63053ea5e9af2e619420f3eca columnFamilyName cf 2024-11-23T08:22:07,129 DEBUG [StoreOpener-9611d4d63053ea5e9af2e619420f3eca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:07,153 DEBUG [StoreFileOpener-9611d4d63053ea5e9af2e619420f3eca-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b: NONE, but ROW specified in column family configuration 2024-11-23T08:22:07,181 DEBUG [StoreOpener-9611d4d63053ea5e9af2e619420f3eca-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_-bottom 2024-11-23T08:22:07,182 INFO [StoreOpener-9611d4d63053ea5e9af2e619420f3eca-1 {}] regionserver.HStore(327): Store=9611d4d63053ea5e9af2e619420f3eca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:22:07,183 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,184 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,187 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,188 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,188 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,191 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,193 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 9611d4d63053ea5e9af2e619420f3eca; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58769071, jitterRate=-0.12427259981632233}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:22:07,193 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:07,194 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 9611d4d63053ea5e9af2e619420f3eca: Running coprocessor pre-open hook at 1732350127124Writing region info on filesystem at 1732350127124Initializing all the Stores at 1732350127126 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350127127 (+1 ms)Cleaning up temporary data from old regions at 1732350127188 (+61 ms)Running coprocessor post-open hooks at 1732350127193 (+5 ms)Region opened successfully at 1732350127194 (+1 ms) 2024-11-23T08:22:07,195 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca., pid=27, masterSystemTime=1732350127117 2024-11-23T08:22:07,196 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.,because compaction is disabled. 2024-11-23T08:22:07,200 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:22:07,200 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:22:07,200 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:22:07,201 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=9611d4d63053ea5e9af2e619420f3eca, regionState=OPEN, openSeqNum=7, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:07,201 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => ebeda0ea67c69d7eab974f2255a55d4f, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.', STARTKEY => '5', ENDKEY => ''} 2024-11-23T08:22:07,201 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. service=AccessControlService 2024-11-23T08:22:07,201 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:22:07,201 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,202 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:22:07,202 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,202 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:22:07,212 INFO [StoreOpener-ebeda0ea67c69d7eab974f2255a55d4f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-11-23T08:22:07,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787 in 240 msec 2024-11-23T08:22:07,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, ASSIGN in 417 msec 2024-11-23T08:22:07,217 INFO [StoreOpener-ebeda0ea67c69d7eab974f2255a55d4f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebeda0ea67c69d7eab974f2255a55d4f columnFamilyName cf 2024-11-23T08:22:07,217 DEBUG [StoreOpener-ebeda0ea67c69d7eab974f2255a55d4f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:07,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:07,249 DEBUG [StoreFileOpener-ebeda0ea67c69d7eab974f2255a55d4f-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b: NONE, but ROW specified in column family configuration 2024-11-23T08:22:07,257 DEBUG [StoreOpener-ebeda0ea67c69d7eab974f2255a55d4f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_-top 2024-11-23T08:22:07,258 INFO [StoreOpener-ebeda0ea67c69d7eab974f2255a55d4f-1 {}] regionserver.HStore(327): Store=ebeda0ea67c69d7eab974f2255a55d4f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:22:07,259 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,260 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,262 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,263 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,263 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,265 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,267 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened ebeda0ea67c69d7eab974f2255a55d4f; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64913060, jitterRate=-0.03272002935409546}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:22:07,268 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:07,268 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for ebeda0ea67c69d7eab974f2255a55d4f: Running coprocessor pre-open hook at 1732350127202Writing region info on filesystem at 1732350127202Initializing all the Stores at 1732350127204 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350127204Cleaning up temporary data from old regions at 1732350127263 (+59 ms)Running coprocessor post-open hooks at 1732350127268 (+5 ms)Region opened successfully at 1732350127268 2024-11-23T08:22:07,270 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f., pid=26, masterSystemTime=1732350127117 2024-11-23T08:22:07,270 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.,because compaction is disabled. 2024-11-23T08:22:07,280 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=ebeda0ea67c69d7eab974f2255a55d4f, regionState=OPEN, openSeqNum=7, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:22:07,281 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:22:07,281 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:22:07,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:22:07,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-23T08:22:07,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787 in 332 msec 2024-11-23T08:22:07,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-11-23T08:22:07,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, ASSIGN in 505 msec 2024-11-23T08:22:07,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d313bbbd59f37fba635a18a29ad0bf4b, daughterA=9611d4d63053ea5e9af2e619420f3eca, daughterB=ebeda0ea67c69d7eab974f2255a55d4f in 1.2400 sec 2024-11-23T08:22:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T08:22:08,238 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:22:08,238 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T08:22:08,245 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350128245 (current time:1732350128245). 2024-11-23T08:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T08:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:22:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72ba979a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:22:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:22:08,247 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:22:08,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:22:08,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:22:08,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cd8a44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:22:08,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:22:08,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,249 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:22:08,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29d21656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:22:08,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:22:08,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:08,253 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:22:08,254 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:22:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:22:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,255 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:22:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6285e45b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:22:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:22:08,256 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:22:08,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:22:08,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:22:08,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@947fe7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:22:08,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:22:08,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,259 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:22:08,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27fd43ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:22:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:22:08,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:22:08,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:08,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59066, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:22:08,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:22:08,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:22:08,267 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:22:08,269 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:22:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:22:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:22:08,270 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:22:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T08:22:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:22:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T08:22:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T08:22:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T08:22:08,275 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:22:08,276 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:22:08,280 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:22:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741864_1040 (size=197) 2024-11-23T08:22:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741864_1040 (size=197) 2024-11-23T08:22:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741864_1040 (size=197) 2024-11-23T08:22:08,292 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:22:08,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9611d4d63053ea5e9af2e619420f3eca}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f}] 2024-11-23T08:22:08,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:08,294 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T08:22:08,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-11-23T08:22:08,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-11-23T08:22:08,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:22:08,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:22:08,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for ebeda0ea67c69d7eab974f2255a55d4f: 2024-11-23T08:22:08,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:22:08,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 9611d4d63053ea5e9af2e619420f3eca: 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_-top] hfiles 2024-11-23T08:22:08,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_-bottom] hfiles 2024-11-23T08:22:08,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741866_1042 (size=182) 2024-11-23T08:22:08,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741865_1041 (size=182) 2024-11-23T08:22:08,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:22:08,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T08:22:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-11-23T08:22:08,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741865_1041 (size=182) 2024-11-23T08:22:08,467 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:08,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:22:08,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-11-23T08:22:08,468 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:22:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741866_1042 (size=182) 2024-11-23T08:22:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741865_1041 (size=182) 2024-11-23T08:22:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741866_1042 (size=182) 2024-11-23T08:22:08,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-11-23T08:22:08,468 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:08,468 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:22:08,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9611d4d63053ea5e9af2e619420f3eca in 177 msec 2024-11-23T08:22:08,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-11-23T08:22:08,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f in 177 msec 2024-11-23T08:22:08,472 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:22:08,473 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T08:22:08,473 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T08:22:08,473 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:22:08,475 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_] hfiles 2024-11-23T08:22:08,475 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ 2024-11-23T08:22:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741867_1043 (size=129) 2024-11-23T08:22:08,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741867_1043 (size=129) 2024-11-23T08:22:08,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741867_1043 (size=129) 2024-11-23T08:22:08,487 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => d313bbbd59f37fba635a18a29ad0bf4b, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,491 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:22:08,492 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:22:08,492 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,494 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741868_1044 (size=891) 2024-11-23T08:22:08,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741868_1044 (size=891) 2024-11-23T08:22:08,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741868_1044 (size=891) 2024-11-23T08:22:08,537 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:22:08,545 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:22:08,545 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,547 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:22:08,548 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T08:22:08,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 277 msec 2024-11-23T08:22:08,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T08:22:08,588 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:22:08,588 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588 2024-11-23T08:22:08,588 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:22:08,631 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:22:08,631 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,635 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:22:08,640 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:22:08,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741869_1045 (size=197) 2024-11-23T08:22:08,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741869_1045 (size=197) 2024-11-23T08:22:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741870_1046 (size=891) 2024-11-23T08:22:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741870_1046 (size=891) 2024-11-23T08:22:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741870_1046 (size=891) 2024-11-23T08:22:08,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741869_1045 (size=197) 2024-11-23T08:22:08,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:08,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:08,671 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-10255545080268148642.jar 2024-11-23T08:22:09,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-17457631974271388582.jar 2024-11-23T08:22:09,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:22:09,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:22:09,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:22:09,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:22:09,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:22:09,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:22:09,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:22:09,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:22:09,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:22:09,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:22:09,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:22:09,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:22:09,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:22:09,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:22:09,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:22:09,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:22:09,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:22:09,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:22:09,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:22:09,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741871_1047 (size=131440) 2024-11-23T08:22:09,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741871_1047 (size=131440) 2024-11-23T08:22:09,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741871_1047 (size=131440) 2024-11-23T08:22:09,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T08:22:09,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T08:22:09,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T08:22:09,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T08:22:09,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T08:22:09,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T08:22:09,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741874_1050 (size=903733) 2024-11-23T08:22:09,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741874_1050 (size=903733) 2024-11-23T08:22:09,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741874_1050 (size=903733) 2024-11-23T08:22:09,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T08:22:09,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T08:22:09,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T08:22:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T08:22:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T08:22:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T08:22:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741877_1053 (size=77835) 2024-11-23T08:22:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741877_1053 (size=77835) 2024-11-23T08:22:09,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741877_1053 (size=77835) 2024-11-23T08:22:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741878_1054 (size=30949) 2024-11-23T08:22:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741878_1054 (size=30949) 2024-11-23T08:22:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741878_1054 (size=30949) 2024-11-23T08:22:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T08:22:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T08:22:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T08:22:10,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T08:22:10,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T08:22:10,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T08:22:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741881_1057 (size=4695811) 2024-11-23T08:22:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741881_1057 (size=4695811) 2024-11-23T08:22:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741881_1057 (size=4695811) 2024-11-23T08:22:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741882_1058 (size=232957) 2024-11-23T08:22:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741882_1058 (size=232957) 2024-11-23T08:22:10,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741882_1058 (size=232957) 2024-11-23T08:22:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741883_1059 (size=127628) 2024-11-23T08:22:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741883_1059 (size=127628) 2024-11-23T08:22:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741883_1059 (size=127628) 2024-11-23T08:22:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741884_1060 (size=20406) 2024-11-23T08:22:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741884_1060 (size=20406) 2024-11-23T08:22:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741884_1060 (size=20406) 2024-11-23T08:22:10,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T08:22:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T08:22:10,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T08:22:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741886_1062 (size=217634) 2024-11-23T08:22:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741886_1062 (size=217634) 2024-11-23T08:22:10,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741886_1062 (size=217634) 2024-11-23T08:22:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741887_1063 (size=1832290) 2024-11-23T08:22:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741887_1063 (size=1832290) 2024-11-23T08:22:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741887_1063 (size=1832290) 2024-11-23T08:22:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741888_1064 (size=322274) 2024-11-23T08:22:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741888_1064 (size=322274) 2024-11-23T08:22:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741888_1064 (size=322274) 2024-11-23T08:22:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741889_1065 (size=503880) 2024-11-23T08:22:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741889_1065 (size=503880) 2024-11-23T08:22:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741889_1065 (size=503880) 2024-11-23T08:22:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741890_1066 (size=29229) 2024-11-23T08:22:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741890_1066 (size=29229) 2024-11-23T08:22:10,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741890_1066 (size=29229) 2024-11-23T08:22:10,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741891_1067 (size=24096) 2024-11-23T08:22:10,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741891_1067 (size=24096) 2024-11-23T08:22:10,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741891_1067 (size=24096) 2024-11-23T08:22:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741892_1068 (size=111872) 2024-11-23T08:22:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741892_1068 (size=111872) 2024-11-23T08:22:10,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741892_1068 (size=111872) 2024-11-23T08:22:10,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741893_1069 (size=440957) 2024-11-23T08:22:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741893_1069 (size=440957) 2024-11-23T08:22:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741893_1069 (size=440957) 2024-11-23T08:22:10,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741894_1070 (size=45609) 2024-11-23T08:22:10,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741894_1070 (size=45609) 2024-11-23T08:22:10,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741894_1070 (size=45609) 2024-11-23T08:22:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741895_1071 (size=136454) 2024-11-23T08:22:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741895_1071 (size=136454) 2024-11-23T08:22:10,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741895_1071 (size=136454) 2024-11-23T08:22:10,231 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:22:10,235 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-11-23T08:22:10,242 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d313bbbd59f37fba635a18a29ad0bf4b-53022b966121430ab7e1382c65d8e3c2_SeqId_4_. 2024-11-23T08:22:10,242 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d313bbbd59f37fba635a18a29ad0bf4b-53022b966121430ab7e1382c65d8e3c2_SeqId_4_. 2024-11-23T08:22:10,243 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-11-23T08:22:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741896_1072 (size=244) 2024-11-23T08:22:10,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741896_1072 (size=244) 2024-11-23T08:22:10,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741896_1072 (size=244) 2024-11-23T08:22:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741897_1073 (size=17) 2024-11-23T08:22:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741897_1073 (size=17) 2024-11-23T08:22:10,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741897_1073 (size=17) 2024-11-23T08:22:10,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741898_1074 (size=304058) 2024-11-23T08:22:10,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741898_1074 (size=304058) 2024-11-23T08:22:10,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741898_1074 (size=304058) 2024-11-23T08:22:10,722 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:22:10,722 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:22:11,227 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0001_000001 (auth:SIMPLE) from 127.0.0.1:52198 2024-11-23T08:22:11,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:22:14,706 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:22:17,431 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0001_000001 (auth:SIMPLE) from 127.0.0.1:60830 2024-11-23T08:22:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741899_1075 (size=349756) 2024-11-23T08:22:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741899_1075 (size=349756) 2024-11-23T08:22:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741899_1075 (size=349756) 2024-11-23T08:22:19,674 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0001_000001 (auth:SIMPLE) from 127.0.0.1:34274 2024-11-23T08:22:32,021 INFO [master/b33d9013c60a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T08:22:32,021 INFO [master/b33d9013c60a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T08:22:41,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ef5793d8f5c1e43ccf012959a4988eca, had cached 0 bytes from a total of 5634 2024-11-23T08:22:41,285 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region de46fac4fb8b590f1d6ffea30a7de582, had cached 0 bytes from a total of 7984 2024-11-23T08:22:44,706 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:22:50,373 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:39396 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1512ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/, blockId=1073741900, seqno=1421 2024-11-23T08:22:50,373 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:50580 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1512ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/, blockId=1073741900, seqno=1421 2024-11-23T08:22:50,373 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:49172 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1512ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/, blockId=1073741900, seqno=1421 2024-11-23T08:22:50,495 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5c2409a8a8e1fdedb01950f8285b9431 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:22:50,495 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ef5793d8f5c1e43ccf012959a4988eca changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:22:50,496 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region de46fac4fb8b590f1d6ffea30a7de582 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:22:52,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9611d4d63053ea5e9af2e619420f3eca, had cached 0 bytes from a total of 320414712 2024-11-23T08:22:52,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ebeda0ea67c69d7eab974f2255a55d4f, had cached 0 bytes from a total of 320414712 2024-11-23T08:22:57,536 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:50580 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1666ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/, blockId=1073741900, seqno=1839 2024-11-23T08:22:57,536 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:49172 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1666ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/, blockId=1073741900, seqno=1839 2024-11-23T08:22:57,536 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:39396 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1666ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/, blockId=1073741900, seqno=1839 2024-11-23T08:23:00,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T08:23:00,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T08:23:00,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T08:23:14,706 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:23:26,236 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ef5793d8f5c1e43ccf012959a4988eca, had cached 0 bytes from a total of 5634 2024-11-23T08:23:26,285 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region de46fac4fb8b590f1d6ffea30a7de582, had cached 0 bytes from a total of 7984 2024-11-23T08:23:36,590 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:52482 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741901_1077] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4635ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/, blockId=1073741901, seqno=4093 2024-11-23T08:23:36,590 WARN [DataXceiver for client DFSClient_attempt_1732350113698_0001_m_000000_0_1936578704_1 at /127.0.0.1:43432 [Receiving block BP-1830007029-172.17.0.2-1732350100447:blk_1073741901_1077] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4635ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/, blockId=1073741901, seqno=4093 2024-11-23T08:23:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T08:23:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T08:23:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T08:23:37,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9611d4d63053ea5e9af2e619420f3eca, had cached 0 bytes from a total of 320414712 2024-11-23T08:23:37,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ebeda0ea67c69d7eab974f2255a55d4f, had cached 0 bytes from a total of 320414712 2024-11-23T08:23:44,706 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:23:49,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T08:23:49,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T08:23:49,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T08:23:49,126 WARN [regionserver/b33d9013c60a:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-11-23T08:23:49,147 WARN [regionserver/b33d9013c60a:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 0 2024-11-23T08:23:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741903_1079 (size=17520) 2024-11-23T08:23:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741903_1079 (size=17520) 2024-11-23T08:23:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741903_1079 (size=17520) 2024-11-23T08:23:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741904_1080 (size=482) 2024-11-23T08:23:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741904_1080 (size=482) 2024-11-23T08:23:49,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741904_1080 (size=482) 2024-11-23T08:23:49,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000002/launch_container.sh] 2024-11-23T08:23:49,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000002/container_tokens] 2024-11-23T08:23:49,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000002/sysfs] 2024-11-23T08:23:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741905_1081 (size=17520) 2024-11-23T08:23:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741905_1081 (size=17520) 2024-11-23T08:23:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741905_1081 (size=17520) 2024-11-23T08:23:49,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741906_1082 (size=349756) 2024-11-23T08:23:49,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741906_1082 (size=349756) 2024-11-23T08:23:49,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741906_1082 (size=349756) 2024-11-23T08:23:51,313 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:23:51,314 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:23:51,321 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,321 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:23:51,321 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:23:51,321 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T08:23:51,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T08:23:51,322 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,323 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T08:23:51,323 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350128588/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T08:23:51,336 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T08:23:51,343 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350231343"}]},"ts":"1732350231343"} 2024-11-23T08:23:51,345 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T08:23:51,345 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T08:23:51,347 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-11-23T08:23:51,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, UNASSIGN}] 2024-11-23T08:23:51,352 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, UNASSIGN 2024-11-23T08:23:51,352 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, UNASSIGN 2024-11-23T08:23:51,353 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=9611d4d63053ea5e9af2e619420f3eca, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:23:51,353 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=ebeda0ea67c69d7eab974f2255a55d4f, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:23:51,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, UNASSIGN because future has completed 2024-11-23T08:23:51,356 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:23:51,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:23:51,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, UNASSIGN because future has completed 2024-11-23T08:23:51,357 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:23:51,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:23:51,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T08:23:51,509 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:23:51,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:23:51,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 9611d4d63053ea5e9af2e619420f3eca, disabling compactions & flushes 2024-11-23T08:23:51,510 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:23:51,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:23:51,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. after waiting 0 ms 2024-11-23T08:23:51,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:23:51,515 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T08:23:51,516 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:23:51,516 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca. 2024-11-23T08:23:51,517 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 9611d4d63053ea5e9af2e619420f3eca: Waiting for close lock at 1732350231510Running coprocessor pre-close hooks at 1732350231510Disabling compacts and flushes for region at 1732350231510Disabling writes for close at 1732350231510Writing region close event to WAL at 1732350231511 (+1 ms)Running coprocessor post-close hooks at 1732350231516 (+5 ms)Closed at 1732350231516 2024-11-23T08:23:51,519 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:23:51,519 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:23:51,520 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:23:51,520 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing ebeda0ea67c69d7eab974f2255a55d4f, disabling compactions & flushes 2024-11-23T08:23:51,520 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:23:51,520 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:23:51,520 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. after waiting 0 ms 2024-11-23T08:23:51,520 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:23:51,521 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=9611d4d63053ea5e9af2e619420f3eca, regionState=CLOSED 2024-11-23T08:23:51,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:23:51,527 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T08:23:51,528 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:23:51,529 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f. 2024-11-23T08:23:51,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-23T08:23:51,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 9611d4d63053ea5e9af2e619420f3eca, server=b33d9013c60a,45199,1732350106787 in 170 msec 2024-11-23T08:23:51,529 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for ebeda0ea67c69d7eab974f2255a55d4f: Waiting for close lock at 1732350231520Running coprocessor pre-close hooks at 1732350231520Disabling compacts and flushes for region at 1732350231520Disabling writes for close at 1732350231520Writing region close event to WAL at 1732350231521 (+1 ms)Running coprocessor post-close hooks at 1732350231528 (+7 ms)Closed at 1732350231529 (+1 ms) 2024-11-23T08:23:51,531 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=9611d4d63053ea5e9af2e619420f3eca, UNASSIGN in 178 msec 2024-11-23T08:23:51,531 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:23:51,532 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=ebeda0ea67c69d7eab974f2255a55d4f, regionState=CLOSED 2024-11-23T08:23:51,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:23:51,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-11-23T08:23:51,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure ebeda0ea67c69d7eab974f2255a55d4f, server=b33d9013c60a,45199,1732350106787 in 178 msec 2024-11-23T08:23:51,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-11-23T08:23:51,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=ebeda0ea67c69d7eab974f2255a55d4f, UNASSIGN in 186 msec 2024-11-23T08:23:51,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-11-23T08:23:51,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 194 msec 2024-11-23T08:23:51,545 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350231545"}]},"ts":"1732350231545"} 2024-11-23T08:23:51,547 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T08:23:51,547 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T08:23:51,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 211 msec 2024-11-23T08:23:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T08:23:51,658 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:23:51,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,666 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,667 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,671 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,677 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:23:51,677 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:23:51,677 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:23:51,681 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/recovered.edits] 2024-11-23T08:23:51,681 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/recovered.edits] 2024-11-23T08:23:51,681 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/recovered.edits] 2024-11-23T08:23:51,689 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:23:51,689 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_.d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:23:51,689 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/cf/53022b966121430ab7e1382c65d8e3c2_SeqId_4_ 2024-11-23T08:23:51,692 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/recovered.edits/10.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca/recovered.edits/10.seqid 2024-11-23T08:23:51,692 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/recovered.edits/10.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f/recovered.edits/10.seqid 2024-11-23T08:23:51,692 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/recovered.edits/6.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b/recovered.edits/6.seqid 2024-11-23T08:23:51,692 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/9611d4d63053ea5e9af2e619420f3eca 2024-11-23T08:23:51,693 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/ebeda0ea67c69d7eab974f2255a55d4f 2024-11-23T08:23:51,693 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportFileSystemStateWithSplitRegion/d313bbbd59f37fba635a18a29ad0bf4b 2024-11-23T08:23:51,693 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-11-23T08:23:51,696 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T08:23:51,705 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T08:23:51,709 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T08:23:51,711 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,711 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T08:23:51,711 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350231711"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:51,711 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350231711"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:51,712 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350231711"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:51,715 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-11-23T08:23:51,715 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d313bbbd59f37fba635a18a29ad0bf4b, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350118402.d313bbbd59f37fba635a18a29ad0bf4b.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 9611d4d63053ea5e9af2e619420f3eca, NAME => 'testExportFileSystemStateWithSplitRegion,,1732350126069.9611d4d63053ea5e9af2e619420f3eca.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => ebeda0ea67c69d7eab974f2255a55d4f, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732350126069.ebeda0ea67c69d7eab974f2255a55d4f.', STARTKEY => '5', ENDKEY => ''}] 2024-11-23T08:23:51,715 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T08:23:51,715 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350231715"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:51,718 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-11-23T08:23:51,719 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 58 msec 2024-11-23T08:23:51,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:51,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:51,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:51,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:51,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T08:23:51,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T08:23:51,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T08:23:51,743 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T08:23:51,742 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T08:23:51,743 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T08:23:51,743 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T08:23:51,743 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T08:23:51,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:51,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:51,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:51,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:51,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-23T08:23:51,745 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,745 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:23:51,745 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T08:23:51,749 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350231749"}]},"ts":"1732350231749"} 2024-11-23T08:23:51,751 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T08:23:51,751 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T08:23:51,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-11-23T08:23:51,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, UNASSIGN}] 2024-11-23T08:23:51,755 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, UNASSIGN 2024-11-23T08:23:51,755 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, UNASSIGN 2024-11-23T08:23:51,756 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=ef5793d8f5c1e43ccf012959a4988eca, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:23:51,756 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=de46fac4fb8b590f1d6ffea30a7de582, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:23:51,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, UNASSIGN because future has completed 2024-11-23T08:23:51,759 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:23:51,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:23:51,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, UNASSIGN because future has completed 2024-11-23T08:23:51,760 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:23:51,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T08:23:51,912 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:23:51,913 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:23:51,913 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing ef5793d8f5c1e43ccf012959a4988eca, disabling compactions & flushes 2024-11-23T08:23:51,913 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:23:51,913 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:23:51,913 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. after waiting 0 ms 2024-11-23T08:23:51,913 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:23:51,913 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:23:51,914 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:23:51,914 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing de46fac4fb8b590f1d6ffea30a7de582, disabling compactions & flushes 2024-11-23T08:23:51,914 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:23:51,914 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:23:51,914 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. after waiting 0 ms 2024-11-23T08:23:51,914 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:23:51,918 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:23:51,920 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:23:51,920 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca. 2024-11-23T08:23:51,920 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for ef5793d8f5c1e43ccf012959a4988eca: Waiting for close lock at 1732350231913Running coprocessor pre-close hooks at 1732350231913Disabling compacts and flushes for region at 1732350231913Disabling writes for close at 1732350231913Writing region close event to WAL at 1732350231914 (+1 ms)Running coprocessor post-close hooks at 1732350231920 (+6 ms)Closed at 1732350231920 2024-11-23T08:23:51,921 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:23:51,922 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:23:51,922 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582. 2024-11-23T08:23:51,922 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:23:51,922 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for de46fac4fb8b590f1d6ffea30a7de582: Waiting for close lock at 1732350231914Running coprocessor pre-close hooks at 1732350231914Disabling compacts and flushes for region at 1732350231914Disabling writes for close at 1732350231914Writing region close event to WAL at 1732350231917 (+3 ms)Running coprocessor post-close hooks at 1732350231922 (+5 ms)Closed at 1732350231922 2024-11-23T08:23:51,923 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=ef5793d8f5c1e43ccf012959a4988eca, regionState=CLOSED 2024-11-23T08:23:51,925 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:23:51,925 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=de46fac4fb8b590f1d6ffea30a7de582, regionState=CLOSED 2024-11-23T08:23:51,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:23:51,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:23:51,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-11-23T08:23:51,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure ef5793d8f5c1e43ccf012959a4988eca, server=b33d9013c60a,39649,1732350107049 in 168 msec 2024-11-23T08:23:51,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ef5793d8f5c1e43ccf012959a4988eca, UNASSIGN in 177 msec 2024-11-23T08:23:51,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-11-23T08:23:51,932 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure de46fac4fb8b590f1d6ffea30a7de582, server=b33d9013c60a,45199,1732350106787 in 170 msec 2024-11-23T08:23:51,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-11-23T08:23:51,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=de46fac4fb8b590f1d6ffea30a7de582, UNASSIGN in 178 msec 2024-11-23T08:23:51,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-23T08:23:51,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 183 msec 2024-11-23T08:23:51,940 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350231939"}]},"ts":"1732350231939"} 2024-11-23T08:23:51,942 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T08:23:51,943 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T08:23:51,946 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 199 msec 2024-11-23T08:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T08:23:52,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:23:52,068 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,070 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,071 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,074 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,076 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:23:52,076 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:23:52,078 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/recovered.edits] 2024-11-23T08:23:52,078 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/recovered.edits] 2024-11-23T08:23:52,082 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/cf/0dd830557eb545559d66630805390ffe 2024-11-23T08:23:52,082 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/cf/965b067889b54ec6aa0133301e35c2b9 2024-11-23T08:23:52,085 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca/recovered.edits/9.seqid 2024-11-23T08:23:52,085 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582/recovered.edits/9.seqid 2024-11-23T08:23:52,086 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/ef5793d8f5c1e43ccf012959a4988eca 2024-11-23T08:23:52,086 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSplitRegion/de46fac4fb8b590f1d6ffea30a7de582 2024-11-23T08:23:52,086 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-11-23T08:23:52,088 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,091 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T08:23:52,094 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T08:23:52,095 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,095 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T08:23:52,096 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350232095"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:52,096 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350232095"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:52,098 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:23:52,098 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ef5793d8f5c1e43ccf012959a4988eca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732350115715.ef5793d8f5c1e43ccf012959a4988eca.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => de46fac4fb8b590f1d6ffea30a7de582, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732350115715.de46fac4fb8b590f1d6ffea30a7de582.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:23:52,098 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T08:23:52,099 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350232099"}]},"ts":"9223372036854775807"} 2024-11-23T08:23:52,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T08:23:52,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T08:23:52,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T08:23:52,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T08:23:52,101 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-11-23T08:23:52,102 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 34 msec 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-23T08:23:52,110 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,110 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T08:23:52,126 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T08:23:52,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T08:23:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,136 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T08:23:52,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:52,163 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=763 (was 719) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:57466 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:36955 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 108121) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1375 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1024052734_1 at /127.0.0.1:59422 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:59462 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36955 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:59006 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=808 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=538 (was 263) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=581 (was 6386) 2024-11-23T08:23:52,163 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=763 is superior to 500 2024-11-23T08:23:52,178 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=763, OpenFileDescriptor=808, MaxFileDescriptor=1048576, SystemLoadAverage=538, ProcessCount=19, AvailableMemoryMB=581 2024-11-23T08:23:52,178 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=763 is superior to 500 2024-11-23T08:23:52,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:23:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:23:52,182 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:23:52,182 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:23:52,182 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-11-23T08:23:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T08:23:52,183 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:23:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741907_1083 (size=406) 2024-11-23T08:23:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741907_1083 (size=406) 2024-11-23T08:23:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741907_1083 (size=406) 2024-11-23T08:23:52,194 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fb9af380c5696ad4024b373531b284ea, NAME => 'testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:23:52,194 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c4282d3cd4c794133d9975c63338034d, NAME => 'testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:23:52,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741908_1084 (size=67) 2024-11-23T08:23:52,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741908_1084 (size=67) 2024-11-23T08:23:52,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741908_1084 (size=67) 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing fb9af380c5696ad4024b373531b284ea, disabling compactions & flushes 2024-11-23T08:23:52,208 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. after waiting 0 ms 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,208 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,208 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for fb9af380c5696ad4024b373531b284ea: Waiting for close lock at 1732350232208Disabling compacts and flushes for region at 1732350232208Disabling writes for close at 1732350232208Writing region close event to WAL at 1732350232208Closed at 1732350232208 2024-11-23T08:23:52,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741909_1085 (size=67) 2024-11-23T08:23:52,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741909_1085 (size=67) 2024-11-23T08:23:52,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741909_1085 (size=67) 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing c4282d3cd4c794133d9975c63338034d, disabling compactions & flushes 2024-11-23T08:23:52,218 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. after waiting 0 ms 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,218 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,218 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for c4282d3cd4c794133d9975c63338034d: Waiting for close lock at 1732350232218Disabling compacts and flushes for region at 1732350232218Disabling writes for close at 1732350232218Writing region close event to WAL at 1732350232218Closed at 1732350232218 2024-11-23T08:23:52,219 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:23:52,220 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732350232219"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350232219"}]},"ts":"1732350232219"} 2024-11-23T08:23:52,220 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732350232219"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350232219"}]},"ts":"1732350232219"} 2024-11-23T08:23:52,222 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:23:52,223 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:23:52,223 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350232223"}]},"ts":"1732350232223"} 2024-11-23T08:23:52,225 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-23T08:23:52,226 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:23:52,227 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:23:52,227 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:23:52,227 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:23:52,227 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:23:52,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, ASSIGN}] 2024-11-23T08:23:52,229 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, ASSIGN 2024-11-23T08:23:52,229 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, ASSIGN 2024-11-23T08:23:52,230 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:23:52,230 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:23:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T08:23:52,381 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:23:52,381 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=c4282d3cd4c794133d9975c63338034d, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:23:52,381 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=fb9af380c5696ad4024b373531b284ea, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:23:52,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, ASSIGN because future has completed 2024-11-23T08:23:52,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:23:52,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, ASSIGN because future has completed 2024-11-23T08:23:52,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:23:52,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T08:23:52,538 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34061, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:23:52,540 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,541 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,541 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => fb9af380c5696ad4024b373531b284ea, NAME => 'testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:23:52,541 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => c4282d3cd4c794133d9975c63338034d, NAME => 'testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:23:52,541 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. service=AccessControlService 2024-11-23T08:23:52,541 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. service=AccessControlService 2024-11-23T08:23:52,541 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:23:52,541 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,542 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,543 INFO [StoreOpener-c4282d3cd4c794133d9975c63338034d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,543 INFO [StoreOpener-fb9af380c5696ad4024b373531b284ea-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,545 INFO [StoreOpener-c4282d3cd4c794133d9975c63338034d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4282d3cd4c794133d9975c63338034d columnFamilyName cf 2024-11-23T08:23:52,545 INFO [StoreOpener-fb9af380c5696ad4024b373531b284ea-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fb9af380c5696ad4024b373531b284ea columnFamilyName cf 2024-11-23T08:23:52,545 DEBUG [StoreOpener-fb9af380c5696ad4024b373531b284ea-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:23:52,545 DEBUG [StoreOpener-c4282d3cd4c794133d9975c63338034d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:23:52,545 INFO [StoreOpener-c4282d3cd4c794133d9975c63338034d-1 {}] regionserver.HStore(327): Store=c4282d3cd4c794133d9975c63338034d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:23:52,545 INFO [StoreOpener-fb9af380c5696ad4024b373531b284ea-1 {}] regionserver.HStore(327): Store=fb9af380c5696ad4024b373531b284ea/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:23:52,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,549 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,549 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,551 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:23:52,552 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:23:52,552 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened fb9af380c5696ad4024b373531b284ea; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72261499, jitterRate=0.07678024470806122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:23:52,552 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,553 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for fb9af380c5696ad4024b373531b284ea: Running coprocessor pre-open hook at 1732350232542Writing region info on filesystem at 1732350232542Initializing all the Stores at 1732350232543 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350232543Cleaning up temporary data from old regions at 1732350232547 (+4 ms)Running coprocessor post-open hooks at 1732350232552 (+5 ms)Region opened successfully at 1732350232553 (+1 ms) 2024-11-23T08:23:52,554 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea., pid=49, masterSystemTime=1732350232537 2024-11-23T08:23:52,554 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened c4282d3cd4c794133d9975c63338034d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74428761, jitterRate=0.10907496511936188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:23:52,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for c4282d3cd4c794133d9975c63338034d: Running coprocessor pre-open hook at 1732350232542Writing region info on filesystem at 1732350232542Initializing all the Stores at 1732350232543 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350232543Cleaning up temporary data from old regions at 1732350232547 (+4 ms)Running coprocessor post-open hooks at 1732350232554 (+7 ms)Region opened successfully at 1732350232554 2024-11-23T08:23:52,555 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., pid=48, masterSystemTime=1732350232536 2024-11-23T08:23:52,556 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,556 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:52,557 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=fb9af380c5696ad4024b373531b284ea, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:23:52,557 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,557 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:52,557 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=c4282d3cd4c794133d9975c63338034d, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:23:52,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:23:52,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:23:52,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-23T08:23:52,562 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049 in 174 msec 2024-11-23T08:23:52,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-23T08:23:52,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, ASSIGN in 334 msec 2024-11-23T08:23:52,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962 in 176 msec 2024-11-23T08:23:52,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-11-23T08:23:52,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, ASSIGN in 335 msec 2024-11-23T08:23:52,565 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:23:52,565 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350232565"}]},"ts":"1732350232565"} 2024-11-23T08:23:52,567 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-23T08:23:52,568 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:23:52,568 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-23T08:23:52,572 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T08:23:52,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:23:52,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:52,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:52,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:52,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T08:23:52,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 437 msec 2024-11-23T08:23:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T08:23:52,808 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T08:23:52,808 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-23T08:23:52,808 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:23:52,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-23T08:23:52,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:23:52,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-23T08:23:52,812 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T08:23:52,816 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T08:23:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350232816 (current time:1732350232816). 2024-11-23T08:23:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:23:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T08:23:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:23:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27df8e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:23:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:23:52,818 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:23:52,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:23:52,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:23:52,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6190e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:23:52,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:23:52,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,820 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:23:52,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@157b2f5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:23:52,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:23:52,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:52,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:52,824 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:23:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:23:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,824 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:23:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@653f050f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:23:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:23:52,826 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:23:52,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:23:52,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:23:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@594bab36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:23:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:23:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,828 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:23:52,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76cb47d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:23:52,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:23:52,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:52,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:52,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:23:52,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:52,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:52,836 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:23:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:23:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:52,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:23:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T08:23:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:23:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T08:23:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T08:23:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T08:23:52,841 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:23:52,842 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:23:52,845 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:23:52,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741910_1086 (size=167) 2024-11-23T08:23:52,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741910_1086 (size=167) 2024-11-23T08:23:52,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741910_1086 (size=167) 2024-11-23T08:23:52,853 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:23:52,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d}] 2024-11-23T08:23:52,855 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:52,855 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T08:23:53,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-11-23T08:23:53,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for c4282d3cd4c794133d9975c63338034d: 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for fb9af380c5696ad4024b373531b284ea: 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T08:23:53,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:23:53,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:23:53,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741911_1087 (size=70) 2024-11-23T08:23:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741911_1087 (size=70) 2024-11-23T08:23:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741911_1087 (size=70) 2024-11-23T08:23:53,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:53,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T08:23:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-11-23T08:23:53,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:53,026 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741912_1088 (size=70) 2024-11-23T08:23:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741912_1088 (size=70) 2024-11-23T08:23:53,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741912_1088 (size=70) 2024-11-23T08:23:53,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:53,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-11-23T08:23:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-11-23T08:23:53,029 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:53,029 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:53,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d in 177 msec 2024-11-23T08:23:53,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-11-23T08:23:53,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea in 177 msec 2024-11-23T08:23:53,032 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:23:53,033 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:23:53,034 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:23:53,034 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-23T08:23:53,035 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-23T08:23:53,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741913_1089 (size=549) 2024-11-23T08:23:53,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741913_1089 (size=549) 2024-11-23T08:23:53,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741913_1089 (size=549) 2024-11-23T08:23:53,051 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:23:53,072 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:23:53,073 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-23T08:23:53,075 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:23:53,075 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T08:23:53,077 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 237 msec 2024-11-23T08:23:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T08:23:53,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T08:23:53,170 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0c2662ead3d2a8b9bd2e310289e70c757', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:23:53,175 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='57264d5dce01892447509d2e3e9e0e73', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:23:53,179 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='17ea55b97e511cceba9a5788165950004', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,180 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='21889b7510365f55573054456fffe5931', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:23:53,181 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='3b7deaf0d3aeacffa6e2b7e3040d5e634', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,182 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='46f4114b274f4e325a4f69d2b4453cc91', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,182 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='61ddbd18d0438b0f3b2797c17acdf989a', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,183 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5ef1248fd0a3491b3b341cf639cc46b87', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:23:53,185 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T08:23:53,185 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0ee4df5897270a494cc5ae7967ab4a96', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:23:53,188 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-23T08:23:53,188 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:53,189 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:23:53,191 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T08:23:53,198 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T08:23:53,208 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T08:23:53,213 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T08:23:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350233214 (current time:1732350233214). 2024-11-23T08:23:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:23:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T08:23:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:23:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@195b95ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:23:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:23:53,216 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:23:53,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:23:53,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:23:53,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23b8b453, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:23:53,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:23:53,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,217 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:23:53,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60ea846f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:23:53,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:23:53,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:53,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:53,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:23:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:23:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,222 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:23:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f2e0ef6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:23:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:23:53,224 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:23:53,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:23:53,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:23:53,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@534e1088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:23:53,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:23:53,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,226 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:23:53,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1463ebf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:23:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:23:53,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:23:53,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:53,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:53,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:23:53,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:23:53,237 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44800, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:23:53,239 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:23:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:23:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:23:53,239 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:23:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T08:23:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:23:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T08:23:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T08:23:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T08:23:53,253 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:23:53,255 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:23:53,260 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:23:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741914_1090 (size=162) 2024-11-23T08:23:53,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741914_1090 (size=162) 2024-11-23T08:23:53,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741914_1090 (size=162) 2024-11-23T08:23:53,275 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:23:53,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d}] 2024-11-23T08:23:53,277 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:53,277 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T08:23:53,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-11-23T08:23:53,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-11-23T08:23:53,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:53,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:53,429 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing fb9af380c5696ad4024b373531b284ea 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T08:23:53,429 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing c4282d3cd4c794133d9975c63338034d 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T08:23:53,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/.tmp/cf/287073dd035f42a5b42734387b5313b9 is 71, key is 0beb24e9be2d11331ae9f5710395eb88/cf:q/1732350233179/Put/seqid=0 2024-11-23T08:23:53,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/.tmp/cf/f81bdb94495146668dac3798357f7f3a is 71, key is 1899a87148287574d06a7f4a30c438b0/cf:q/1732350233180/Put/seqid=0 2024-11-23T08:23:53,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741915_1091 (size=5354) 2024-11-23T08:23:53,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741915_1091 (size=5354) 2024-11-23T08:23:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741915_1091 (size=5354) 2024-11-23T08:23:53,452 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/.tmp/cf/287073dd035f42a5b42734387b5313b9 2024-11-23T08:23:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741916_1092 (size=8258) 2024-11-23T08:23:53,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741916_1092 (size=8258) 2024-11-23T08:23:53,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741916_1092 (size=8258) 2024-11-23T08:23:53,457 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/.tmp/cf/f81bdb94495146668dac3798357f7f3a 2024-11-23T08:23:53,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/.tmp/cf/287073dd035f42a5b42734387b5313b9 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9 2024-11-23T08:23:53,471 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9, entries=4, sequenceid=6, filesize=5.2 K 2024-11-23T08:23:53,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for fb9af380c5696ad4024b373531b284ea in 43ms, sequenceid=6, compaction requested=false 2024-11-23T08:23:53,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for fb9af380c5696ad4024b373531b284ea: 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. for snaptb0-testExportWithTargetName completed. 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9] hfiles 2024-11-23T08:23:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9 for snapshot=snaptb0-testExportWithTargetName 2024-11-23T08:23:53,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/.tmp/cf/f81bdb94495146668dac3798357f7f3a as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a 2024-11-23T08:23:53,486 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a, entries=46, sequenceid=6, filesize=8.1 K 2024-11-23T08:23:53,487 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c4282d3cd4c794133d9975c63338034d in 57ms, sequenceid=6, compaction requested=false 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for c4282d3cd4c794133d9975c63338034d: 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. for snaptb0-testExportWithTargetName completed. 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a] hfiles 2024-11-23T08:23:53,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a for snapshot=snaptb0-testExportWithTargetName 2024-11-23T08:23:53,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741917_1093 (size=109) 2024-11-23T08:23:53,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741917_1093 (size=109) 2024-11-23T08:23:53,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741917_1093 (size=109) 2024-11-23T08:23:53,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:23:53,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T08:23:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-11-23T08:23:53,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:53,491 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea 2024-11-23T08:23:53,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fb9af380c5696ad4024b373531b284ea in 219 msec 2024-11-23T08:23:53,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741918_1094 (size=109) 2024-11-23T08:23:53,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741918_1094 (size=109) 2024-11-23T08:23:53,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741918_1094 (size=109) 2024-11-23T08:23:53,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:23:53,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-11-23T08:23:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-11-23T08:23:53,511 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:53,511 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d 2024-11-23T08:23:53,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-23T08:23:53,514 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:23:53,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c4282d3cd4c794133d9975c63338034d in 236 msec 2024-11-23T08:23:53,515 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:23:53,515 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:23:53,516 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-23T08:23:53,516 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-23T08:23:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741919_1095 (size=627) 2024-11-23T08:23:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741919_1095 (size=627) 2024-11-23T08:23:53,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741919_1095 (size=627) 2024-11-23T08:23:53,530 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:23:53,536 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:23:53,536 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T08:23:53,538 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:23:53,538 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T08:23:53,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 291 msec 2024-11-23T08:23:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T08:23:53,568 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T08:23:53,568 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568 2024-11-23T08:23:53,568 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:23:53,598 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:23:53,598 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T08:23:53,600 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:23:53,604 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T08:23:53,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741920_1096 (size=627) 2024-11-23T08:23:53,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741920_1096 (size=627) 2024-11-23T08:23:53,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741921_1097 (size=162) 2024-11-23T08:23:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741920_1096 (size=627) 2024-11-23T08:23:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741921_1097 (size=162) 2024-11-23T08:23:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741921_1097 (size=162) 2024-11-23T08:23:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741922_1098 (size=154) 2024-11-23T08:23:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741922_1098 (size=154) 2024-11-23T08:23:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741922_1098 (size=154) 2024-11-23T08:23:53,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:53,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:53,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-12260584311064603615.jar 2024-11-23T08:23:54,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-15295496850827524312.jar 2024-11-23T08:23:54,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:23:54,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:23:54,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:23:54,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:23:54,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:23:54,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:23:54,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:23:54,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:23:54,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:23:54,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:23:54,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:23:54,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:23:54,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:23:54,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:23:54,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:23:54,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:23:54,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:23:54,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:23:54,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:23:54,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741923_1099 (size=131440) 2024-11-23T08:23:54,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741923_1099 (size=131440) 2024-11-23T08:23:54,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741923_1099 (size=131440) 2024-11-23T08:23:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741924_1100 (size=6424742) 2024-11-23T08:23:54,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741924_1100 (size=6424742) 2024-11-23T08:23:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741924_1100 (size=6424742) 2024-11-23T08:23:54,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741925_1101 (size=4188619) 2024-11-23T08:23:54,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741925_1101 (size=4188619) 2024-11-23T08:23:54,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741925_1101 (size=4188619) 2024-11-23T08:23:54,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741926_1102 (size=1323991) 2024-11-23T08:23:54,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741926_1102 (size=1323991) 2024-11-23T08:23:54,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741926_1102 (size=1323991) 2024-11-23T08:23:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741927_1103 (size=903733) 2024-11-23T08:23:54,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741927_1103 (size=903733) 2024-11-23T08:23:54,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741927_1103 (size=903733) 2024-11-23T08:23:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741928_1104 (size=8360083) 2024-11-23T08:23:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741928_1104 (size=8360083) 2024-11-23T08:23:54,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741928_1104 (size=8360083) 2024-11-23T08:23:54,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741929_1105 (size=1877034) 2024-11-23T08:23:54,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741929_1105 (size=1877034) 2024-11-23T08:23:54,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741929_1105 (size=1877034) 2024-11-23T08:23:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741930_1106 (size=77835) 2024-11-23T08:23:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741930_1106 (size=77835) 2024-11-23T08:23:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741930_1106 (size=77835) 2024-11-23T08:23:54,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741931_1107 (size=30949) 2024-11-23T08:23:54,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741931_1107 (size=30949) 2024-11-23T08:23:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741931_1107 (size=30949) 2024-11-23T08:23:54,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741932_1108 (size=1597347) 2024-11-23T08:23:54,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741932_1108 (size=1597347) 2024-11-23T08:23:54,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741932_1108 (size=1597347) 2024-11-23T08:23:54,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741933_1109 (size=4695811) 2024-11-23T08:23:54,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741933_1109 (size=4695811) 2024-11-23T08:23:54,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741933_1109 (size=4695811) 2024-11-23T08:23:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741934_1110 (size=232957) 2024-11-23T08:23:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741934_1110 (size=232957) 2024-11-23T08:23:55,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741934_1110 (size=232957) 2024-11-23T08:23:55,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741935_1111 (size=127628) 2024-11-23T08:23:55,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741935_1111 (size=127628) 2024-11-23T08:23:55,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741935_1111 (size=127628) 2024-11-23T08:23:55,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741936_1112 (size=20406) 2024-11-23T08:23:55,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741936_1112 (size=20406) 2024-11-23T08:23:55,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741936_1112 (size=20406) 2024-11-23T08:23:55,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741937_1113 (size=5175431) 2024-11-23T08:23:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741937_1113 (size=5175431) 2024-11-23T08:23:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741937_1113 (size=5175431) 2024-11-23T08:23:55,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741938_1114 (size=217634) 2024-11-23T08:23:55,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741938_1114 (size=217634) 2024-11-23T08:23:55,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741938_1114 (size=217634) 2024-11-23T08:23:55,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741939_1115 (size=1832290) 2024-11-23T08:23:55,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741939_1115 (size=1832290) 2024-11-23T08:23:55,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741939_1115 (size=1832290) 2024-11-23T08:23:55,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741940_1116 (size=322274) 2024-11-23T08:23:55,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741940_1116 (size=322274) 2024-11-23T08:23:55,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741940_1116 (size=322274) 2024-11-23T08:23:55,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741941_1117 (size=503880) 2024-11-23T08:23:55,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741941_1117 (size=503880) 2024-11-23T08:23:55,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741941_1117 (size=503880) 2024-11-23T08:23:55,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741942_1118 (size=29229) 2024-11-23T08:23:55,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741942_1118 (size=29229) 2024-11-23T08:23:55,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741942_1118 (size=29229) 2024-11-23T08:23:55,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741943_1119 (size=24096) 2024-11-23T08:23:55,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741943_1119 (size=24096) 2024-11-23T08:23:55,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741943_1119 (size=24096) 2024-11-23T08:23:55,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741944_1120 (size=111872) 2024-11-23T08:23:55,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741944_1120 (size=111872) 2024-11-23T08:23:55,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741944_1120 (size=111872) 2024-11-23T08:23:55,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741945_1121 (size=440957) 2024-11-23T08:23:55,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741945_1121 (size=440957) 2024-11-23T08:23:55,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741945_1121 (size=440957) 2024-11-23T08:23:55,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741946_1122 (size=45609) 2024-11-23T08:23:55,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741946_1122 (size=45609) 2024-11-23T08:23:55,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741946_1122 (size=45609) 2024-11-23T08:23:55,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741947_1123 (size=136454) 2024-11-23T08:23:55,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741947_1123 (size=136454) 2024-11-23T08:23:55,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741947_1123 (size=136454) 2024-11-23T08:23:55,199 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:23:55,202 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-23T08:23:55,204 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T08:23:55,205 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T08:23:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741948_1124 (size=445) 2024-11-23T08:23:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741948_1124 (size=445) 2024-11-23T08:23:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741948_1124 (size=445) 2024-11-23T08:23:55,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741949_1125 (size=21) 2024-11-23T08:23:55,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741949_1125 (size=21) 2024-11-23T08:23:55,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741949_1125 (size=21) 2024-11-23T08:23:55,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741950_1126 (size=304007) 2024-11-23T08:23:55,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741950_1126 (size=304007) 2024-11-23T08:23:55,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741950_1126 (size=304007) 2024-11-23T08:23:55,771 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:23:55,771 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:23:55,775 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0001_000001 (auth:SIMPLE) from 127.0.0.1:60960 2024-11-23T08:23:55,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000001/launch_container.sh] 2024-11-23T08:23:55,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000001/container_tokens] 2024-11-23T08:23:55,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0001/container_1732350113698_0001_01_000001/sysfs] 2024-11-23T08:23:56,220 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:37456 2024-11-23T08:23:56,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T08:23:56,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-23T08:23:56,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:56,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T08:23:57,073 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:24:02,361 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:34876 2024-11-23T08:24:02,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741951_1127 (size=349705) 2024-11-23T08:24:02,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741951_1127 (size=349705) 2024-11-23T08:24:02,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741951_1127 (size=349705) 2024-11-23T08:24:04,711 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:49076 2024-11-23T08:24:04,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:39360 2024-11-23T08:24:12,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741952_1128 (size=5354) 2024-11-23T08:24:12,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741952_1128 (size=5354) 2024-11-23T08:24:12,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741952_1128 (size=5354) 2024-11-23T08:24:12,910 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000003/launch_container.sh] 2024-11-23T08:24:12,910 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000003/container_tokens] 2024-11-23T08:24:12,910 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000003/sysfs] 2024-11-23T08:24:13,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741954_1130 (size=8258) 2024-11-23T08:24:13,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741954_1130 (size=8258) 2024-11-23T08:24:13,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741954_1130 (size=8258) 2024-11-23T08:24:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741953_1129 (size=22164) 2024-11-23T08:24:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741953_1129 (size=22164) 2024-11-23T08:24:14,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741953_1129 (size=22164) 2024-11-23T08:24:14,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741955_1131 (size=465) 2024-11-23T08:24:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741955_1131 (size=465) 2024-11-23T08:24:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741955_1131 (size=465) 2024-11-23T08:24:14,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741956_1132 (size=22164) 2024-11-23T08:24:14,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741956_1132 (size=22164) 2024-11-23T08:24:14,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741956_1132 (size=22164) 2024-11-23T08:24:14,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741957_1133 (size=349705) 2024-11-23T08:24:14,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741957_1133 (size=349705) 2024-11-23T08:24:14,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741957_1133 (size=349705) 2024-11-23T08:24:14,102 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000002/launch_container.sh] 2024-11-23T08:24:14,102 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000002/container_tokens] 2024-11-23T08:24:14,102 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000002/sysfs] 2024-11-23T08:24:14,105 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:44514 2024-11-23T08:24:14,707 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:24:15,887 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:24:15,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:24:15,895 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-11-23T08:24:15,895 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:24:15,896 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:24:15,896 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T08:24:15,896 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-23T08:24:15,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-23T08:24:15,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/testExportWithTargetName 2024-11-23T08:24:15,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-23T08:24:15,897 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350233568/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-23T08:24:15,907 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-23T08:24:15,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:15,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T08:24:15,912 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350255912"}]},"ts":"1732350255912"} 2024-11-23T08:24:15,914 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-23T08:24:15,914 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-23T08:24:15,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-23T08:24:15,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, UNASSIGN}] 2024-11-23T08:24:15,918 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, UNASSIGN 2024-11-23T08:24:15,919 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, UNASSIGN 2024-11-23T08:24:15,920 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=c4282d3cd4c794133d9975c63338034d, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:15,920 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=fb9af380c5696ad4024b373531b284ea, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:15,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, UNASSIGN because future has completed 2024-11-23T08:24:15,923 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:15,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:15,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, UNASSIGN because future has completed 2024-11-23T08:24:15,924 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:15,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:24:16,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T08:24:16,075 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close fb9af380c5696ad4024b373531b284ea 2024-11-23T08:24:16,075 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:16,075 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing fb9af380c5696ad4024b373531b284ea, disabling compactions & flushes 2024-11-23T08:24:16,075 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:24:16,075 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:24:16,075 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. after waiting 0 ms 2024-11-23T08:24:16,075 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:24:16,076 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close c4282d3cd4c794133d9975c63338034d 2024-11-23T08:24:16,076 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:16,076 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing c4282d3cd4c794133d9975c63338034d, disabling compactions & flushes 2024-11-23T08:24:16,076 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:24:16,077 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:24:16,077 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. after waiting 0 ms 2024-11-23T08:24:16,077 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:24:16,080 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:24:16,080 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:24:16,081 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:16,081 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:16,081 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea. 2024-11-23T08:24:16,081 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d. 2024-11-23T08:24:16,081 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for fb9af380c5696ad4024b373531b284ea: Waiting for close lock at 1732350256075Running coprocessor pre-close hooks at 1732350256075Disabling compacts and flushes for region at 1732350256075Disabling writes for close at 1732350256075Writing region close event to WAL at 1732350256076 (+1 ms)Running coprocessor post-close hooks at 1732350256081 (+5 ms)Closed at 1732350256081 2024-11-23T08:24:16,081 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for c4282d3cd4c794133d9975c63338034d: Waiting for close lock at 1732350256076Running coprocessor pre-close hooks at 1732350256076Disabling compacts and flushes for region at 1732350256076Disabling writes for close at 1732350256077 (+1 ms)Writing region close event to WAL at 1732350256077Running coprocessor post-close hooks at 1732350256081 (+4 ms)Closed at 1732350256081 2024-11-23T08:24:16,083 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed fb9af380c5696ad4024b373531b284ea 2024-11-23T08:24:16,084 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=fb9af380c5696ad4024b373531b284ea, regionState=CLOSED 2024-11-23T08:24:16,084 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed c4282d3cd4c794133d9975c63338034d 2024-11-23T08:24:16,085 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=c4282d3cd4c794133d9975c63338034d, regionState=CLOSED 2024-11-23T08:24:16,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:16,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:24:16,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-23T08:24:16,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-23T08:24:16,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure c4282d3cd4c794133d9975c63338034d, server=b33d9013c60a,43417,1732350106962 in 164 msec 2024-11-23T08:24:16,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure fb9af380c5696ad4024b373531b284ea, server=b33d9013c60a,39649,1732350107049 in 164 msec 2024-11-23T08:24:16,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fb9af380c5696ad4024b373531b284ea, UNASSIGN in 173 msec 2024-11-23T08:24:16,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-11-23T08:24:16,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=c4282d3cd4c794133d9975c63338034d, UNASSIGN in 173 msec 2024-11-23T08:24:16,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-11-23T08:24:16,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 177 msec 2024-11-23T08:24:16,095 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350256095"}]},"ts":"1732350256095"} 2024-11-23T08:24:16,096 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-23T08:24:16,096 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-23T08:24:16,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 189 msec 2024-11-23T08:24:16,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T08:24:16,228 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T08:24:16,229 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-23T08:24:16,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,232 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-23T08:24:16,233 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,235 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-23T08:24:16,238 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea 2024-11-23T08:24:16,238 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d 2024-11-23T08:24:16,240 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/recovered.edits] 2024-11-23T08:24:16,240 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/recovered.edits] 2024-11-23T08:24:16,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T08:24:16,247 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/cf/f81bdb94495146668dac3798357f7f3a 2024-11-23T08:24:16,248 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/cf/287073dd035f42a5b42734387b5313b9 2024-11-23T08:24:16,250 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea/recovered.edits/9.seqid 2024-11-23T08:24:16,250 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d/recovered.edits/9.seqid 2024-11-23T08:24:16,251 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/fb9af380c5696ad4024b373531b284ea 2024-11-23T08:24:16,251 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithTargetName/c4282d3cd4c794133d9975c63338034d 2024-11-23T08:24:16,251 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-23T08:24:16,253 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,256 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-23T08:24:16,258 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-23T08:24:16,259 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,259 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-23T08:24:16,259 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350256259"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:16,260 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350256259"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:16,262 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:24:16,262 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => fb9af380c5696ad4024b373531b284ea, NAME => 'testtb-testExportWithTargetName,,1732350232180.fb9af380c5696ad4024b373531b284ea.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c4282d3cd4c794133d9975c63338034d, NAME => 'testtb-testExportWithTargetName,1,1732350232180.c4282d3cd4c794133d9975c63338034d.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:24:16,262 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-23T08:24:16,262 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350256262"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:16,264 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-23T08:24:16,265 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T08:24:16,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 36 msec 2024-11-23T08:24:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T08:24:16,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T08:24:16,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T08:24:16,271 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-23T08:24:16,280 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-23T08:24:16,280 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T08:24:16,288 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T08:24:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-23T08:24:16,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T08:24:16,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-23T08:24:16,317 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=793 (was 763) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36695 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1310886873_1 at /127.0.0.1:35224 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:52542 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:36695 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:35252 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 113834) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45687 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:45278 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39609 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1310886873_1 at /127.0.0.1:52512 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36421 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:45687 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-2134 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=809 (was 808) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=638 (was 538) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=2201 (was 581) - AvailableMemoryMB LEAK? - 2024-11-23T08:24:16,318 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-23T08:24:16,335 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=793, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=638, ProcessCount=19, AvailableMemoryMB=2201 2024-11-23T08:24:16,335 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-23T08:24:16,337 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:24:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:16,339 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:24:16,339 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:16,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-11-23T08:24:16,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T08:24:16,341 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:24:16,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741958_1134 (size=404) 2024-11-23T08:24:16,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741958_1134 (size=404) 2024-11-23T08:24:16,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741958_1134 (size=404) 2024-11-23T08:24:16,351 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3c265c0a26cf89d10d928836bf98f43f, NAME => 'testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:16,352 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 96b41dc10d05701322f44a234b88a415, NAME => 'testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:16,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741960_1136 (size=65) 2024-11-23T08:24:16,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741960_1136 (size=65) 2024-11-23T08:24:16,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741959_1135 (size=65) 2024-11-23T08:24:16,363 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:16,363 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 3c265c0a26cf89d10d928836bf98f43f, disabling compactions & flushes 2024-11-23T08:24:16,363 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,363 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741960_1136 (size=65) 2024-11-23T08:24:16,363 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. after waiting 0 ms 2024-11-23T08:24:16,363 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741959_1135 (size=65) 2024-11-23T08:24:16,363 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741959_1135 (size=65) 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3c265c0a26cf89d10d928836bf98f43f: Waiting for close lock at 1732350256363Disabling compacts and flushes for region at 1732350256363Disabling writes for close at 1732350256363Writing region close event to WAL at 1732350256363Closed at 1732350256363 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 96b41dc10d05701322f44a234b88a415, disabling compactions & flushes 2024-11-23T08:24:16,364 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. after waiting 0 ms 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,364 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,364 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 96b41dc10d05701322f44a234b88a415: Waiting for close lock at 1732350256364Disabling compacts and flushes for region at 1732350256364Disabling writes for close at 1732350256364Writing region close event to WAL at 1732350256364Closed at 1732350256364 2024-11-23T08:24:16,365 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:24:16,365 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350256365"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350256365"}]},"ts":"1732350256365"} 2024-11-23T08:24:16,366 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350256365"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350256365"}]},"ts":"1732350256365"} 2024-11-23T08:24:16,368 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:24:16,369 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:24:16,369 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350256369"}]},"ts":"1732350256369"} 2024-11-23T08:24:16,371 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T08:24:16,371 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:24:16,373 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:24:16,373 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:24:16,373 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:24:16,373 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:24:16,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, ASSIGN}] 2024-11-23T08:24:16,375 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, ASSIGN 2024-11-23T08:24:16,375 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, ASSIGN 2024-11-23T08:24:16,376 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:24:16,376 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:24:16,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T08:24:16,527 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:24:16,528 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=96b41dc10d05701322f44a234b88a415, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:16,528 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=3c265c0a26cf89d10d928836bf98f43f, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:16,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, ASSIGN because future has completed 2024-11-23T08:24:16,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:24:16,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, ASSIGN because future has completed 2024-11-23T08:24:16,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:16,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T08:24:16,695 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,695 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 96b41dc10d05701322f44a234b88a415, NAME => 'testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. service=AccessControlService 2024-11-23T08:24:16,696 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:16,696 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 3c265c0a26cf89d10d928836bf98f43f, NAME => 'testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,696 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. service=AccessControlService 2024-11-23T08:24:16,697 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:16,697 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,697 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:16,697 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,697 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,698 INFO [StoreOpener-96b41dc10d05701322f44a234b88a415-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,698 INFO [StoreOpener-3c265c0a26cf89d10d928836bf98f43f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,700 INFO [StoreOpener-96b41dc10d05701322f44a234b88a415-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96b41dc10d05701322f44a234b88a415 columnFamilyName cf 2024-11-23T08:24:16,700 INFO [StoreOpener-3c265c0a26cf89d10d928836bf98f43f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c265c0a26cf89d10d928836bf98f43f columnFamilyName cf 2024-11-23T08:24:16,700 DEBUG [StoreOpener-3c265c0a26cf89d10d928836bf98f43f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:16,700 DEBUG [StoreOpener-96b41dc10d05701322f44a234b88a415-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:16,701 INFO [StoreOpener-3c265c0a26cf89d10d928836bf98f43f-1 {}] regionserver.HStore(327): Store=3c265c0a26cf89d10d928836bf98f43f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:16,701 INFO [StoreOpener-96b41dc10d05701322f44a234b88a415-1 {}] regionserver.HStore(327): Store=96b41dc10d05701322f44a234b88a415/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:16,701 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,701 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,701 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,702 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,702 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,702 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,702 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,703 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,703 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,703 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,704 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,705 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,706 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:16,706 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:16,707 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 96b41dc10d05701322f44a234b88a415; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69862834, jitterRate=0.04103735089302063}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:16,707 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 3c265c0a26cf89d10d928836bf98f43f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63668262, jitterRate=-0.051268965005874634}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:16,707 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:16,707 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:16,707 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 96b41dc10d05701322f44a234b88a415: Running coprocessor pre-open hook at 1732350256696Writing region info on filesystem at 1732350256696Initializing all the Stores at 1732350256697 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350256697Cleaning up temporary data from old regions at 1732350256703 (+6 ms)Running coprocessor post-open hooks at 1732350256707 (+4 ms)Region opened successfully at 1732350256707 2024-11-23T08:24:16,708 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 3c265c0a26cf89d10d928836bf98f43f: Running coprocessor pre-open hook at 1732350256697Writing region info on filesystem at 1732350256697Initializing all the Stores at 1732350256698 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350256698Cleaning up temporary data from old regions at 1732350256703 (+5 ms)Running coprocessor post-open hooks at 1732350256707 (+4 ms)Region opened successfully at 1732350256707 2024-11-23T08:24:16,708 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., pid=66, masterSystemTime=1732350256687 2024-11-23T08:24:16,708 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f., pid=67, masterSystemTime=1732350256690 2024-11-23T08:24:16,710 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,710 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:16,710 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=3c265c0a26cf89d10d928836bf98f43f, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:16,711 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,711 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:16,711 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=96b41dc10d05701322f44a234b88a415, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:16,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:16,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:24:16,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-11-23T08:24:16,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049 in 177 msec 2024-11-23T08:24:16,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-11-23T08:24:16,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, ASSIGN in 342 msec 2024-11-23T08:24:16,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962 in 180 msec 2024-11-23T08:24:16,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-23T08:24:16,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, ASSIGN in 343 msec 2024-11-23T08:24:16,719 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:24:16,719 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350256719"}]},"ts":"1732350256719"} 2024-11-23T08:24:16,721 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T08:24:16,722 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:24:16,722 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-23T08:24:16,725 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T08:24:16,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:16,788 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:16,788 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:16,788 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:16,788 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:16,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 451 msec 2024-11-23T08:24:16,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T08:24:16,969 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T08:24:16,969 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-23T08:24:16,970 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:16,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-23T08:24:16,978 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:16,978 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-23T08:24:16,979 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:16,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350256982 (current time:1732350256982). 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57ce84ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:16,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:16,985 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:16,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:16,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:16,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b4d956, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:16,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:16,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:16,987 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:16,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3316433, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:16,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:16,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:16,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:16,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:16,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:16,990 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77e5520b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:16,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:16,992 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:16,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:16,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:16,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bfd46e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:16,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:16,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:16,993 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:16,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ecd4dab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:16,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:16,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:16,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:16,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:16,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:16,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:16,999 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:17,001 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T08:24:17,002 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:17,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:24:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T08:24:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T08:24:17,005 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:24:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T08:24:17,007 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:24:17,009 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:24:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741961_1137 (size=161) 2024-11-23T08:24:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741961_1137 (size=161) 2024-11-23T08:24:17,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741961_1137 (size=161) 2024-11-23T08:24:17,016 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:24:17,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415}] 2024-11-23T08:24:17,017 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,017 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T08:24:17,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-11-23T08:24:17,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-11-23T08:24:17,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:17,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 3c265c0a26cf89d10d928836bf98f43f: 2024-11-23T08:24:17,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T08:24:17,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:17,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:17,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:24:17,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:17,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 96b41dc10d05701322f44a234b88a415: 2024-11-23T08:24:17,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T08:24:17,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:17,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:17,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:24:17,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741962_1138 (size=68) 2024-11-23T08:24:17,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:17,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-11-23T08:24:17,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741962_1138 (size=68) 2024-11-23T08:24:17,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741962_1138 (size=68) 2024-11-23T08:24:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-11-23T08:24:17,204 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,204 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741963_1139 (size=68) 2024-11-23T08:24:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741963_1139 (size=68) 2024-11-23T08:24:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741963_1139 (size=68) 2024-11-23T08:24:17,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:17,206 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T08:24:17,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-11-23T08:24:17,206 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,207 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f in 189 msec 2024-11-23T08:24:17,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-11-23T08:24:17,208 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:24:17,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 in 191 msec 2024-11-23T08:24:17,209 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:24:17,210 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:24:17,210 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:17,211 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:17,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741964_1140 (size=543) 2024-11-23T08:24:17,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741964_1140 (size=543) 2024-11-23T08:24:17,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741964_1140 (size=543) 2024-11-23T08:24:17,224 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:24:17,229 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:24:17,229 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:17,230 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:24:17,230 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T08:24:17,231 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 227 msec 2024-11-23T08:24:17,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T08:24:17,329 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T08:24:17,341 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0f8599b59f6351a950930c4aa129c0730', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:17,343 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1e249fc5f9842bd172a3218922f61be7b', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:17,344 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2f7ab2335873378294b33a6afa81763ff', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:17,345 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3518e26ae9f6e464c9b5c2991a6e5e320', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:17,345 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='480bd419fbf5d88cb52d5c8124dd18837', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:17,346 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='59636ec7328bd91fb472b7193e4e9a65a', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:17,348 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:17,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:17,353 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:17,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-23T08:24:17,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:17,355 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:17,357 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:17,362 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:17,367 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:17,370 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T08:24:17,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350257370 (current time:1732350257370). 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fdabf3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:17,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:17,372 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:17,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:17,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:17,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7424c3bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:17,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:17,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,374 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:17,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e363a90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:17,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:17,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:17,378 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:17,379 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,379 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:17,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753af94e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:17,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:17,381 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:17,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52524d6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,383 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:17,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3669d251, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:17,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:17,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:17,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:17,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46710, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:17,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:17,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:17,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51372, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:17,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:17,390 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:17,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T08:24:17,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:24:17,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T08:24:17,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T08:24:17,393 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:24:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T08:24:17,394 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:24:17,397 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:24:17,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741965_1141 (size=156) 2024-11-23T08:24:17,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741965_1141 (size=156) 2024-11-23T08:24:17,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741965_1141 (size=156) 2024-11-23T08:24:17,404 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:24:17,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415}] 2024-11-23T08:24:17,405 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,405 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T08:24:17,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-11-23T08:24:17,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-11-23T08:24:17,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:17,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:17,558 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 3c265c0a26cf89d10d928836bf98f43f 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T08:24:17,558 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 96b41dc10d05701322f44a234b88a415 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T08:24:17,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/.tmp/cf/ef786532291b432781ba814793d52467 is 71, key is 0e1b8860a804be06febea0251334c84f/cf:q/1732350257348/Put/seqid=0 2024-11-23T08:24:17,576 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/.tmp/cf/8c9dd8eda7a9411d900e3e045625b828 is 71, key is 22ca78b1ed300fb5bb1be70bd248769f/cf:q/1732350257351/Put/seqid=0 2024-11-23T08:24:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741967_1143 (size=8258) 2024-11-23T08:24:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741966_1142 (size=5354) 2024-11-23T08:24:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741967_1143 (size=8258) 2024-11-23T08:24:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741967_1143 (size=8258) 2024-11-23T08:24:17,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741966_1142 (size=5354) 2024-11-23T08:24:17,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/.tmp/cf/8c9dd8eda7a9411d900e3e045625b828 2024-11-23T08:24:17,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741966_1142 (size=5354) 2024-11-23T08:24:17,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/.tmp/cf/ef786532291b432781ba814793d52467 2024-11-23T08:24:17,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/.tmp/cf/8c9dd8eda7a9411d900e3e045625b828 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828 2024-11-23T08:24:17,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/.tmp/cf/ef786532291b432781ba814793d52467 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467 2024-11-23T08:24:17,597 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828, entries=46, sequenceid=6, filesize=8.1 K 2024-11-23T08:24:17,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467, entries=4, sequenceid=6, filesize=5.2 K 2024-11-23T08:24:17,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 96b41dc10d05701322f44a234b88a415 in 40ms, sequenceid=6, compaction requested=false 2024-11-23T08:24:17,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-23T08:24:17,599 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 3c265c0a26cf89d10d928836bf98f43f in 40ms, sequenceid=6, compaction requested=false 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 96b41dc10d05701322f44a234b88a415: 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 3c265c0a26cf89d10d928836bf98f43f: 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. for snaptb0-testExportWithResetTtl completed. 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. for snaptb0-testExportWithResetTtl completed. 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828] hfiles 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828 for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467] hfiles 2024-11-23T08:24:17,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467 for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741968_1144 (size=107) 2024-11-23T08:24:17,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741968_1144 (size=107) 2024-11-23T08:24:17,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741968_1144 (size=107) 2024-11-23T08:24:17,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:17,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T08:24:17,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-11-23T08:24:17,614 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,614 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:17,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3c265c0a26cf89d10d928836bf98f43f in 211 msec 2024-11-23T08:24:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741969_1145 (size=107) 2024-11-23T08:24:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741969_1145 (size=107) 2024-11-23T08:24:17,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:17,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-11-23T08:24:17,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741969_1145 (size=107) 2024-11-23T08:24:17,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-11-23T08:24:17,621 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,621 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:17,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-23T08:24:17,624 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:24:17,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 96b41dc10d05701322f44a234b88a415 in 218 msec 2024-11-23T08:24:17,625 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:24:17,625 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:24:17,625 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,626 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741970_1146 (size=621) 2024-11-23T08:24:17,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741970_1146 (size=621) 2024-11-23T08:24:17,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741970_1146 (size=621) 2024-11-23T08:24:17,636 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:24:17,643 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:24:17,643 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-23T08:24:17,644 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:24:17,644 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T08:24:17,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 253 msec 2024-11-23T08:24:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T08:24:17,708 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T08:24:17,709 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:24:17,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:17,711 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:24:17,711 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:17,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-11-23T08:24:17,712 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:24:17,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T08:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741971_1147 (size=397) 2024-11-23T08:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741971_1147 (size=397) 2024-11-23T08:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741971_1147 (size=397) 2024-11-23T08:24:17,728 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1618166822a6ed6bd3b421503eeaa04d, NAME => 'testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:17,729 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c961ca5918d2528b30d9c5b9943fe618, NAME => 'testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741973_1149 (size=58) 2024-11-23T08:24:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741972_1148 (size=58) 2024-11-23T08:24:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741973_1149 (size=58) 2024-11-23T08:24:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741973_1149 (size=58) 2024-11-23T08:24:17,738 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741972_1148 (size=58) 2024-11-23T08:24:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741972_1148 (size=58) 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 1618166822a6ed6bd3b421503eeaa04d, disabling compactions & flushes 2024-11-23T08:24:17,739 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. after waiting 0 ms 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:17,739 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1618166822a6ed6bd3b421503eeaa04d: Waiting for close lock at 1732350257738Disabling compacts and flushes for region at 1732350257738Disabling writes for close at 1732350257739 (+1 ms)Writing region close event to WAL at 1732350257739Closed at 1732350257739 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing c961ca5918d2528b30d9c5b9943fe618, disabling compactions & flushes 2024-11-23T08:24:17,739 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. after waiting 0 ms 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:17,739 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:17,739 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for c961ca5918d2528b30d9c5b9943fe618: Waiting for close lock at 1732350257739Disabling compacts and flushes for region at 1732350257739Disabling writes for close at 1732350257739Writing region close event to WAL at 1732350257739Closed at 1732350257739 2024-11-23T08:24:17,740 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:24:17,741 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732350257741"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350257741"}]},"ts":"1732350257741"} 2024-11-23T08:24:17,741 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732350257741"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350257741"}]},"ts":"1732350257741"} 2024-11-23T08:24:17,744 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:24:17,745 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:24:17,745 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350257745"}]},"ts":"1732350257745"} 2024-11-23T08:24:17,747 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T08:24:17,748 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:24:17,749 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:24:17,750 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:24:17,750 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:24:17,750 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:24:17,750 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:24:17,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, ASSIGN}] 2024-11-23T08:24:17,751 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, ASSIGN 2024-11-23T08:24:17,751 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, ASSIGN 2024-11-23T08:24:17,752 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:24:17,753 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, ASSIGN; state=OFFLINE, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:24:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T08:24:17,903 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:24:17,903 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=c961ca5918d2528b30d9c5b9943fe618, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:17,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=1618166822a6ed6bd3b421503eeaa04d, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:24:17,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, ASSIGN because future has completed 2024-11-23T08:24:17,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:24:17,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, ASSIGN because future has completed 2024-11-23T08:24:17,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:18,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T08:24:18,065 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:18,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => c961ca5918d2528b30d9c5b9943fe618, NAME => 'testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:24:18,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,066 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 1618166822a6ed6bd3b421503eeaa04d, NAME => 'testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:24:18,066 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. service=AccessControlService 2024-11-23T08:24:18,066 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. service=AccessControlService 2024-11-23T08:24:18,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:18,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,068 INFO [StoreOpener-1618166822a6ed6bd3b421503eeaa04d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,069 INFO [StoreOpener-c961ca5918d2528b30d9c5b9943fe618-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,070 INFO [StoreOpener-1618166822a6ed6bd3b421503eeaa04d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1618166822a6ed6bd3b421503eeaa04d columnFamilyName cf 2024-11-23T08:24:18,070 INFO [StoreOpener-c961ca5918d2528b30d9c5b9943fe618-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c961ca5918d2528b30d9c5b9943fe618 columnFamilyName cf 2024-11-23T08:24:18,070 DEBUG [StoreOpener-1618166822a6ed6bd3b421503eeaa04d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:18,070 DEBUG [StoreOpener-c961ca5918d2528b30d9c5b9943fe618-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:18,071 INFO [StoreOpener-1618166822a6ed6bd3b421503eeaa04d-1 {}] regionserver.HStore(327): Store=1618166822a6ed6bd3b421503eeaa04d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:18,071 INFO [StoreOpener-c961ca5918d2528b30d9c5b9943fe618-1 {}] regionserver.HStore(327): Store=c961ca5918d2528b30d9c5b9943fe618/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:18,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,072 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,073 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,073 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:18,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:18,076 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened c961ca5918d2528b30d9c5b9943fe618; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66291492, jitterRate=-0.012179791927337646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:18,076 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 1618166822a6ed6bd3b421503eeaa04d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73789607, jitterRate=0.09955082833766937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:18,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for c961ca5918d2528b30d9c5b9943fe618: Running coprocessor pre-open hook at 1732350258067Writing region info on filesystem at 1732350258067Initializing all the Stores at 1732350258068 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350258068Cleaning up temporary data from old regions at 1732350258072 (+4 ms)Running coprocessor post-open hooks at 1732350258076 (+4 ms)Region opened successfully at 1732350258076 2024-11-23T08:24:18,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 1618166822a6ed6bd3b421503eeaa04d: Running coprocessor pre-open hook at 1732350258067Writing region info on filesystem at 1732350258067Initializing all the Stores at 1732350258068 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350258068Cleaning up temporary data from old regions at 1732350258072 (+4 ms)Running coprocessor post-open hooks at 1732350258076 (+4 ms)Region opened successfully at 1732350258076 2024-11-23T08:24:18,077 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618., pid=78, masterSystemTime=1732350258059 2024-11-23T08:24:18,077 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d., pid=77, masterSystemTime=1732350258057 2024-11-23T08:24:18,078 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,078 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=1618166822a6ed6bd3b421503eeaa04d, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:24:18,079 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:18,079 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:18,080 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=c961ca5918d2528b30d9c5b9943fe618, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:18,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:24:18,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:18,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-11-23T08:24:18,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787 in 175 msec 2024-11-23T08:24:18,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, ASSIGN in 333 msec 2024-11-23T08:24:18,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-11-23T08:24:18,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049 in 176 msec 2024-11-23T08:24:18,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-11-23T08:24:18,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, ASSIGN in 334 msec 2024-11-23T08:24:18,087 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:24:18,087 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350258087"}]},"ts":"1732350258087"} 2024-11-23T08:24:18,088 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T08:24:18,089 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:24:18,089 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-23T08:24:18,092 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T08:24:18,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:18,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:18,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:18,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:18,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 459 msec 2024-11-23T08:24:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T08:24:18,337 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-23T08:24:18,337 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-23T08:24:18,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:18,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-23T08:24:18,341 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:18,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-23T08:24:18,341 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:18,346 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='01b6da8d4879a102cbb874b4a3b230310', locateType=CURRENT is [region=testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:24:18,347 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='1188db8ad889fe8aca3f7cb9455dab119', locateType=CURRENT is [region=testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:18,348 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2c3627ac99d546a9aa63bab4c985d3b82', locateType=CURRENT is [region=testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:18,349 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='348fdda6b036dfb55c0ee1ad2d76a9fd1', locateType=CURRENT is [region=testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:18,349 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='4e2ad3dc954c63eae6a99dfdbeab2a0ac', locateType=CURRENT is [region=testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:18,352 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:18,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:18,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:18,357 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:18,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-23T08:24:18,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,359 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:18,361 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:18,366 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:18,371 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T08:24:18,373 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T08:24:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350258373 (current time:1732350258373). 2024-11-23T08:24:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T08:24:18,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:24:18,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23757e03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:18,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:18,374 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e6a1e06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:18,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,376 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:18,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@623e2232, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:18,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:18,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:18,379 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:18,391 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:18,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:18,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,391 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:18,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca8a7d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:18,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:18,394 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:18,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:18,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:18,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2734163b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:18,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:18,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,395 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:18,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a250f9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:18,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:18,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:18,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:18,399 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:18,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:18,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:18,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:18,403 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:18,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:18,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T08:24:18,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:24:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T08:24:18,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T08:24:18,407 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:24:18,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T08:24:18,409 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:24:18,411 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:24:18,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741974_1150 (size=143) 2024-11-23T08:24:18,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741974_1150 (size=143) 2024-11-23T08:24:18,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741974_1150 (size=143) 2024-11-23T08:24:18,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:24:18,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1618166822a6ed6bd3b421503eeaa04d}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c961ca5918d2528b30d9c5b9943fe618}] 2024-11-23T08:24:18,427 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,427 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T08:24:18,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-11-23T08:24:18,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-11-23T08:24:18,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:18,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 1618166822a6ed6bd3b421503eeaa04d 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T08:24:18,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing c961ca5918d2528b30d9c5b9943fe618 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T08:24:18,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/.tmp/cf/f43501d6e7284127a5197add6b40da2b is 71, key is 0b6521dbaab7fb82d69e4b2b0238467d/cf:q/1732350258353/Put/seqid=0 2024-11-23T08:24:18,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/.tmp/cf/8d1852e0ee95442cbca282e929e31e2b is 71, key is 10f80cf50ca2419124054ffc1f790496/cf:q/1732350258355/Put/seqid=0 2024-11-23T08:24:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741976_1152 (size=5216) 2024-11-23T08:24:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741976_1152 (size=5216) 2024-11-23T08:24:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741976_1152 (size=5216) 2024-11-23T08:24:18,599 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/.tmp/cf/f43501d6e7284127a5197add6b40da2b 2024-11-23T08:24:18,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741975_1151 (size=8392) 2024-11-23T08:24:18,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741975_1151 (size=8392) 2024-11-23T08:24:18,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741975_1151 (size=8392) 2024-11-23T08:24:18,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/.tmp/cf/f43501d6e7284127a5197add6b40da2b as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b 2024-11-23T08:24:18,612 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b, entries=2, sequenceid=5, filesize=5.1 K 2024-11-23T08:24:18,613 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 1618166822a6ed6bd3b421503eeaa04d in 35ms, sequenceid=5, compaction requested=false 2024-11-23T08:24:18,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-23T08:24:18,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 1618166822a6ed6bd3b421503eeaa04d: 2024-11-23T08:24:18,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. for snaptb-testExportWithResetTtl completed. 2024-11-23T08:24:18,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T08:24:18,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:18,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b] hfiles 2024-11-23T08:24:18,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b for snapshot=snaptb-testExportWithResetTtl 2024-11-23T08:24:18,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741977_1153 (size=100) 2024-11-23T08:24:18,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741977_1153 (size=100) 2024-11-23T08:24:18,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:18,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741977_1153 (size=100) 2024-11-23T08:24:18,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T08:24:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-11-23T08:24:18,622 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,622 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:18,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1618166822a6ed6bd3b421503eeaa04d in 198 msec 2024-11-23T08:24:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T08:24:19,002 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/.tmp/cf/8d1852e0ee95442cbca282e929e31e2b 2024-11-23T08:24:19,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/.tmp/cf/8d1852e0ee95442cbca282e929e31e2b as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b 2024-11-23T08:24:19,013 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b, entries=48, sequenceid=5, filesize=8.2 K 2024-11-23T08:24:19,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for c961ca5918d2528b30d9c5b9943fe618 in 436ms, sequenceid=5, compaction requested=false 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for c961ca5918d2528b30d9c5b9943fe618: 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. for snaptb-testExportWithResetTtl completed. 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b] hfiles 2024-11-23T08:24:19,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b for snapshot=snaptb-testExportWithResetTtl 2024-11-23T08:24:19,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741978_1154 (size=100) 2024-11-23T08:24:19,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741978_1154 (size=100) 2024-11-23T08:24:19,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741978_1154 (size=100) 2024-11-23T08:24:19,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:19,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-11-23T08:24:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-11-23T08:24:19,021 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:19,021 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:19,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-11-23T08:24:19,023 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:24:19,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c961ca5918d2528b30d9c5b9943fe618 in 597 msec 2024-11-23T08:24:19,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:24:19,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:24:19,025 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-23T08:24:19,025 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T08:24:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741979_1155 (size=600) 2024-11-23T08:24:19,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741979_1155 (size=600) 2024-11-23T08:24:19,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741979_1155 (size=600) 2024-11-23T08:24:19,037 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:24:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T08:24:19,043 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:24:19,044 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T08:24:19,045 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:24:19,045 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T08:24:19,047 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 640 msec 2024-11-23T08:24:19,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T08:24:19,548 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-23T08:24:19,559 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559 2024-11-23T08:24:19,559 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:19,597 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:19,597 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T08:24:19,600 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:24:19,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741981_1157 (size=143) 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741980_1156 (size=600) 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741980_1156 (size=600) 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741980_1156 (size=600) 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741981_1157 (size=143) 2024-11-23T08:24:19,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741981_1157 (size=143) 2024-11-23T08:24:19,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741982_1158 (size=141) 2024-11-23T08:24:19,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741982_1158 (size=141) 2024-11-23T08:24:19,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741982_1158 (size=141) 2024-11-23T08:24:19,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:19,634 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:19,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,171 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0002_000001 (auth:SIMPLE) from 127.0.0.1:36146 2024-11-23T08:24:20,186 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000001/launch_container.sh] 2024-11-23T08:24:20,186 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000001/container_tokens] 2024-11-23T08:24:20,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0002/container_1732350113698_0002_01_000001/sysfs] 2024-11-23T08:24:20,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-17863624591916194296.jar 2024-11-23T08:24:20,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-1964357194754592092.jar 2024-11-23T08:24:20,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,716 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:20,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:24:20,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:24:20,717 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:24:20,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:24:20,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:24:20,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:24:20,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:24:20,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:24:20,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:24:20,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:24:20,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:24:20,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:20,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:20,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:20,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:20,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:20,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:20,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:20,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741983_1159 (size=131440) 2024-11-23T08:24:20,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741983_1159 (size=131440) 2024-11-23T08:24:20,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741983_1159 (size=131440) 2024-11-23T08:24:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T08:24:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T08:24:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T08:24:20,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T08:24:20,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T08:24:20,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T08:24:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741986_1162 (size=903733) 2024-11-23T08:24:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741986_1162 (size=903733) 2024-11-23T08:24:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741986_1162 (size=903733) 2024-11-23T08:24:20,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T08:24:20,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T08:24:20,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T08:24:20,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T08:24:20,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T08:24:20,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T08:24:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741989_1165 (size=77835) 2024-11-23T08:24:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741989_1165 (size=77835) 2024-11-23T08:24:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741989_1165 (size=77835) 2024-11-23T08:24:20,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741990_1166 (size=30949) 2024-11-23T08:24:20,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741990_1166 (size=30949) 2024-11-23T08:24:20,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741990_1166 (size=30949) 2024-11-23T08:24:20,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741991_1167 (size=1597347) 2024-11-23T08:24:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741991_1167 (size=1597347) 2024-11-23T08:24:20,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741991_1167 (size=1597347) 2024-11-23T08:24:20,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741992_1168 (size=4695811) 2024-11-23T08:24:20,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741992_1168 (size=4695811) 2024-11-23T08:24:20,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741992_1168 (size=4695811) 2024-11-23T08:24:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741993_1169 (size=232957) 2024-11-23T08:24:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741993_1169 (size=232957) 2024-11-23T08:24:20,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741993_1169 (size=232957) 2024-11-23T08:24:20,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741994_1170 (size=127628) 2024-11-23T08:24:20,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741994_1170 (size=127628) 2024-11-23T08:24:20,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741994_1170 (size=127628) 2024-11-23T08:24:20,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741995_1171 (size=20406) 2024-11-23T08:24:20,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741995_1171 (size=20406) 2024-11-23T08:24:20,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741995_1171 (size=20406) 2024-11-23T08:24:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741996_1172 (size=5175431) 2024-11-23T08:24:20,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741996_1172 (size=5175431) 2024-11-23T08:24:20,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741996_1172 (size=5175431) 2024-11-23T08:24:20,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741997_1173 (size=440957) 2024-11-23T08:24:20,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741997_1173 (size=440957) 2024-11-23T08:24:20,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741997_1173 (size=440957) 2024-11-23T08:24:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741998_1174 (size=217634) 2024-11-23T08:24:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741998_1174 (size=217634) 2024-11-23T08:24:20,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741998_1174 (size=217634) 2024-11-23T08:24:20,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741999_1175 (size=1832290) 2024-11-23T08:24:20,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741999_1175 (size=1832290) 2024-11-23T08:24:20,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741999_1175 (size=1832290) 2024-11-23T08:24:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742000_1176 (size=322274) 2024-11-23T08:24:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742000_1176 (size=322274) 2024-11-23T08:24:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742000_1176 (size=322274) 2024-11-23T08:24:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742001_1177 (size=503880) 2024-11-23T08:24:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742001_1177 (size=503880) 2024-11-23T08:24:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742001_1177 (size=503880) 2024-11-23T08:24:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742002_1178 (size=29229) 2024-11-23T08:24:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742002_1178 (size=29229) 2024-11-23T08:24:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742002_1178 (size=29229) 2024-11-23T08:24:21,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742003_1179 (size=24096) 2024-11-23T08:24:21,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742003_1179 (size=24096) 2024-11-23T08:24:21,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742003_1179 (size=24096) 2024-11-23T08:24:21,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742004_1180 (size=111872) 2024-11-23T08:24:21,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742004_1180 (size=111872) 2024-11-23T08:24:21,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742004_1180 (size=111872) 2024-11-23T08:24:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742005_1181 (size=45609) 2024-11-23T08:24:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742005_1181 (size=45609) 2024-11-23T08:24:21,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742005_1181 (size=45609) 2024-11-23T08:24:21,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742006_1182 (size=6424742) 2024-11-23T08:24:21,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742006_1182 (size=6424742) 2024-11-23T08:24:21,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742006_1182 (size=6424742) 2024-11-23T08:24:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742007_1183 (size=136454) 2024-11-23T08:24:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742007_1183 (size=136454) 2024-11-23T08:24:21,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742007_1183 (size=136454) 2024-11-23T08:24:21,512 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:24:21,514 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-23T08:24:21,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.2 K 2024-11-23T08:24:21,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.1 K 2024-11-23T08:24:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742008_1184 (size=427) 2024-11-23T08:24:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742008_1184 (size=427) 2024-11-23T08:24:21,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742008_1184 (size=427) 2024-11-23T08:24:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742009_1185 (size=21) 2024-11-23T08:24:21,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742009_1185 (size=21) 2024-11-23T08:24:21,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742009_1185 (size=21) 2024-11-23T08:24:21,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742010_1186 (size=303996) 2024-11-23T08:24:21,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742010_1186 (size=303996) 2024-11-23T08:24:21,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742010_1186 (size=303996) 2024-11-23T08:24:21,593 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:24:21,594 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:24:21,624 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:24:22,097 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:51184 2024-11-23T08:24:26,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T08:24:26,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T08:24:26,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T08:24:26,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T08:24:27,699 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:37538 2024-11-23T08:24:27,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742011_1187 (size=349694) 2024-11-23T08:24:27,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742011_1187 (size=349694) 2024-11-23T08:24:27,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742011_1187 (size=349694) 2024-11-23T08:24:30,139 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:58228 2024-11-23T08:24:30,145 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:51200 2024-11-23T08:24:38,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742012_1188 (size=8392) 2024-11-23T08:24:38,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742012_1188 (size=8392) 2024-11-23T08:24:38,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742012_1188 (size=8392) 2024-11-23T08:24:38,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000002/launch_container.sh] 2024-11-23T08:24:38,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000002/container_tokens] 2024-11-23T08:24:38,297 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000002/sysfs] 2024-11-23T08:24:39,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742014_1190 (size=5216) 2024-11-23T08:24:39,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742014_1190 (size=5216) 2024-11-23T08:24:39,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742014_1190 (size=5216) 2024-11-23T08:24:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742013_1189 (size=22125) 2024-11-23T08:24:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742013_1189 (size=22125) 2024-11-23T08:24:39,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742013_1189 (size=22125) 2024-11-23T08:24:39,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742015_1191 (size=462) 2024-11-23T08:24:39,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742015_1191 (size=462) 2024-11-23T08:24:39,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742015_1191 (size=462) 2024-11-23T08:24:39,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742016_1192 (size=22125) 2024-11-23T08:24:39,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742016_1192 (size=22125) 2024-11-23T08:24:39,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742016_1192 (size=22125) 2024-11-23T08:24:39,990 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000003/launch_container.sh] 2024-11-23T08:24:39,990 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000003/container_tokens] 2024-11-23T08:24:39,990 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000003/sysfs] 2024-11-23T08:24:39,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742017_1193 (size=349694) 2024-11-23T08:24:39,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742017_1193 (size=349694) 2024-11-23T08:24:39,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742017_1193 (size=349694) 2024-11-23T08:24:40,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:54372 2024-11-23T08:24:41,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:24:41,825 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:24:41,833 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-11-23T08:24:41,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:24:41,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T08:24:41,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350259559/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T08:24:41,852 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-23T08:24:41,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:41,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T08:24:41,856 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350281856"}]},"ts":"1732350281856"} 2024-11-23T08:24:41,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T08:24:41,858 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-23T08:24:41,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-23T08:24:41,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, UNASSIGN}] 2024-11-23T08:24:41,862 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, UNASSIGN 2024-11-23T08:24:41,862 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, UNASSIGN 2024-11-23T08:24:41,863 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=c961ca5918d2528b30d9c5b9943fe618, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:41,863 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=1618166822a6ed6bd3b421503eeaa04d, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:24:41,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, UNASSIGN because future has completed 2024-11-23T08:24:41,866 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:41,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:24:41,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, UNASSIGN because future has completed 2024-11-23T08:24:41,869 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:41,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T08:24:42,021 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:42,021 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 1618166822a6ed6bd3b421503eeaa04d, disabling compactions & flushes 2024-11-23T08:24:42,022 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. after waiting 0 ms 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:42,022 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing c961ca5918d2528b30d9c5b9943fe618, disabling compactions & flushes 2024-11-23T08:24:42,022 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. after waiting 0 ms 2024-11-23T08:24:42,022 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:42,031 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:24:42,031 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:24:42,032 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:42,032 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d. 2024-11-23T08:24:42,033 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:42,033 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 1618166822a6ed6bd3b421503eeaa04d: Waiting for close lock at 1732350282022Running coprocessor pre-close hooks at 1732350282022Disabling compacts and flushes for region at 1732350282022Disabling writes for close at 1732350282022Writing region close event to WAL at 1732350282023 (+1 ms)Running coprocessor post-close hooks at 1732350282032 (+9 ms)Closed at 1732350282032 2024-11-23T08:24:42,033 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618. 2024-11-23T08:24:42,033 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for c961ca5918d2528b30d9c5b9943fe618: Waiting for close lock at 1732350282022Running coprocessor pre-close hooks at 1732350282022Disabling compacts and flushes for region at 1732350282022Disabling writes for close at 1732350282022Writing region close event to WAL at 1732350282023 (+1 ms)Running coprocessor post-close hooks at 1732350282033 (+10 ms)Closed at 1732350282033 2024-11-23T08:24:42,035 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:42,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=1618166822a6ed6bd3b421503eeaa04d, regionState=CLOSED 2024-11-23T08:24:42,036 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:42,038 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=c961ca5918d2528b30d9c5b9943fe618, regionState=CLOSED 2024-11-23T08:24:42,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:24:42,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:42,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-23T08:24:42,041 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 1618166822a6ed6bd3b421503eeaa04d, server=b33d9013c60a,45199,1732350106787 in 173 msec 2024-11-23T08:24:42,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-11-23T08:24:42,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=1618166822a6ed6bd3b421503eeaa04d, UNASSIGN in 180 msec 2024-11-23T08:24:42,044 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure c961ca5918d2528b30d9c5b9943fe618, server=b33d9013c60a,39649,1732350107049 in 172 msec 2024-11-23T08:24:42,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-11-23T08:24:42,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=c961ca5918d2528b30d9c5b9943fe618, UNASSIGN in 183 msec 2024-11-23T08:24:42,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-11-23T08:24:42,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 190 msec 2024-11-23T08:24:42,054 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350282054"}]},"ts":"1732350282054"} 2024-11-23T08:24:42,056 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T08:24:42,056 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-23T08:24:42,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 205 msec 2024-11-23T08:24:42,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T08:24:42,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-23T08:24:42,169 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-23T08:24:42,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,172 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-23T08:24:42,174 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,177 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-23T08:24:42,180 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:42,180 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:42,184 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/recovered.edits] 2024-11-23T08:24:42,184 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/recovered.edits] 2024-11-23T08:24:42,193 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/cf/f43501d6e7284127a5197add6b40da2b 2024-11-23T08:24:42,193 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/cf/8d1852e0ee95442cbca282e929e31e2b 2024-11-23T08:24:42,196 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/recovered.edits/8.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618/recovered.edits/8.seqid 2024-11-23T08:24:42,197 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/recovered.edits/8.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d/recovered.edits/8.seqid 2024-11-23T08:24:42,197 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/c961ca5918d2528b30d9c5b9943fe618 2024-11-23T08:24:42,197 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportWithResetTtl/1618166822a6ed6bd3b421503eeaa04d 2024-11-23T08:24:42,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-23T08:24:42,199 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,202 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-23T08:24:42,205 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-23T08:24:42,207 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,207 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-23T08:24:42,207 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350282207"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,207 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350282207"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:24:42,211 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1618166822a6ed6bd3b421503eeaa04d, NAME => 'testExportWithResetTtl,,1732350257708.1618166822a6ed6bd3b421503eeaa04d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c961ca5918d2528b30d9c5b9943fe618, NAME => 'testExportWithResetTtl,1,1732350257708.c961ca5918d2528b30d9c5b9943fe618.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:24:42,211 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-23T08:24:42,211 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350282211"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,213 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-23T08:24:42,214 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T08:24:42,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 45 msec 2024-11-23T08:24:42,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T08:24:42,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-23T08:24:42,243 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:42,244 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-23T08:24:42,244 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:42,244 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-23T08:24:42,244 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:42,244 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:42,245 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-23T08:24:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T08:24:42,251 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350282250"}]},"ts":"1732350282250"} 2024-11-23T08:24:42,253 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T08:24:42,253 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-23T08:24:42,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-23T08:24:42,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, UNASSIGN}] 2024-11-23T08:24:42,258 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, UNASSIGN 2024-11-23T08:24:42,258 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, UNASSIGN 2024-11-23T08:24:42,259 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=3c265c0a26cf89d10d928836bf98f43f, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:42,259 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=96b41dc10d05701322f44a234b88a415, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:42,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, UNASSIGN because future has completed 2024-11-23T08:24:42,263 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:42,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:24:42,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, UNASSIGN because future has completed 2024-11-23T08:24:42,264 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:24:42,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:42,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T08:24:42,416 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:42,416 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:42,416 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 96b41dc10d05701322f44a234b88a415, disabling compactions & flushes 2024-11-23T08:24:42,417 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:42,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:42,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. after waiting 0 ms 2024-11-23T08:24:42,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:42,418 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:42,418 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:24:42,418 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 3c265c0a26cf89d10d928836bf98f43f, disabling compactions & flushes 2024-11-23T08:24:42,419 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:42,419 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:42,419 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. after waiting 0 ms 2024-11-23T08:24:42,419 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:42,421 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:24:42,422 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:24:42,422 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:42,422 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415. 2024-11-23T08:24:42,422 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 96b41dc10d05701322f44a234b88a415: Waiting for close lock at 1732350282416Running coprocessor pre-close hooks at 1732350282416Disabling compacts and flushes for region at 1732350282416Disabling writes for close at 1732350282417 (+1 ms)Writing region close event to WAL at 1732350282417Running coprocessor post-close hooks at 1732350282422 (+5 ms)Closed at 1732350282422 2024-11-23T08:24:42,422 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:24:42,422 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f. 2024-11-23T08:24:42,422 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 3c265c0a26cf89d10d928836bf98f43f: Waiting for close lock at 1732350282418Running coprocessor pre-close hooks at 1732350282418Disabling compacts and flushes for region at 1732350282418Disabling writes for close at 1732350282419 (+1 ms)Writing region close event to WAL at 1732350282419Running coprocessor post-close hooks at 1732350282422 (+3 ms)Closed at 1732350282422 2024-11-23T08:24:42,424 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:42,425 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=3c265c0a26cf89d10d928836bf98f43f, regionState=CLOSED 2024-11-23T08:24:42,425 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:42,425 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=96b41dc10d05701322f44a234b88a415, regionState=CLOSED 2024-11-23T08:24:42,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:42,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:24:42,428 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-11-23T08:24:42,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 3c265c0a26cf89d10d928836bf98f43f, server=b33d9013c60a,39649,1732350107049 in 163 msec 2024-11-23T08:24:42,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-11-23T08:24:42,430 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3c265c0a26cf89d10d928836bf98f43f, UNASSIGN in 172 msec 2024-11-23T08:24:42,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 96b41dc10d05701322f44a234b88a415, server=b33d9013c60a,43417,1732350106962 in 165 msec 2024-11-23T08:24:42,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-23T08:24:42,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=96b41dc10d05701322f44a234b88a415, UNASSIGN in 174 msec 2024-11-23T08:24:42,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T08:24:42,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 177 msec 2024-11-23T08:24:42,433 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350282433"}]},"ts":"1732350282433"} 2024-11-23T08:24:42,435 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T08:24:42,435 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-23T08:24:42,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 191 msec 2024-11-23T08:24:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T08:24:42,568 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T08:24:42,568 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-23T08:24:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,570 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-23T08:24:42,571 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,573 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-23T08:24:42,575 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:42,575 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:42,577 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/recovered.edits] 2024-11-23T08:24:42,577 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/recovered.edits] 2024-11-23T08:24:42,581 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/cf/ef786532291b432781ba814793d52467 2024-11-23T08:24:42,581 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/cf/8c9dd8eda7a9411d900e3e045625b828 2024-11-23T08:24:42,584 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415/recovered.edits/9.seqid 2024-11-23T08:24:42,584 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f/recovered.edits/9.seqid 2024-11-23T08:24:42,584 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/3c265c0a26cf89d10d928836bf98f43f 2024-11-23T08:24:42,584 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithResetTtl/96b41dc10d05701322f44a234b88a415 2024-11-23T08:24:42,584 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-23T08:24:42,587 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,589 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-23T08:24:42,591 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-23T08:24:42,593 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,593 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-23T08:24:42,593 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350282593"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,593 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350282593"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,595 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:24:42,596 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3c265c0a26cf89d10d928836bf98f43f, NAME => 'testtb-testExportWithResetTtl,,1732350256337.3c265c0a26cf89d10d928836bf98f43f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 96b41dc10d05701322f44a234b88a415, NAME => 'testtb-testExportWithResetTtl,1,1732350256337.96b41dc10d05701322f44a234b88a415.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:24:42,596 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-23T08:24:42,596 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350282596"}]},"ts":"9223372036854775807"} 2024-11-23T08:24:42,598 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-23T08:24:42,599 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T08:24:42,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 31 msec 2024-11-23T08:24:42,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,634 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:42,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-23T08:24:42,643 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-23T08:24:42,643 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T08:24:42,652 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T08:24:42,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-23T08:24:42,656 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-23T08:24:42,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-23T08:24:42,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T08:24:42,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-23T08:24:42,685 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=799 (was 793) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1624668369_1 at /127.0.0.1:42320 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:53938 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 117848) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1624668369_1 at /127.0.0.1:53908 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2963 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:39609 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:42342 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:34999 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:49848 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 809) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=768 (was 638) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 19), AvailableMemoryMB=3297 (was 2201) - AvailableMemoryMB LEAK? - 2024-11-23T08:24:42,685 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-23T08:24:42,703 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=799, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=768, ProcessCount=17, AvailableMemoryMB=3297 2024-11-23T08:24:42,703 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-23T08:24:42,704 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:24:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:24:42,706 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:24:42,707 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:42,707 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-11-23T08:24:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T08:24:42,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:24:42,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742018_1194 (size=407) 2024-11-23T08:24:42,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742018_1194 (size=407) 2024-11-23T08:24:42,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742018_1194 (size=407) 2024-11-23T08:24:42,724 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ef38f1c77fb1f2ee154421c6efb858f6, NAME => 'testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:42,725 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ee2fd65d63bd4e9f2bc75773c639a445, NAME => 'testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742020_1196 (size=68) 2024-11-23T08:24:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742020_1196 (size=68) 2024-11-23T08:24:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742020_1196 (size=68) 2024-11-23T08:24:42,734 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:42,734 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing ee2fd65d63bd4e9f2bc75773c639a445, disabling compactions & flushes 2024-11-23T08:24:42,734 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:42,734 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:42,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. after waiting 1 ms 2024-11-23T08:24:42,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:42,735 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:42,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for ee2fd65d63bd4e9f2bc75773c639a445: Waiting for close lock at 1732350282734Disabling compacts and flushes for region at 1732350282734Disabling writes for close at 1732350282735 (+1 ms)Writing region close event to WAL at 1732350282735Closed at 1732350282735 2024-11-23T08:24:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742019_1195 (size=68) 2024-11-23T08:24:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742019_1195 (size=68) 2024-11-23T08:24:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742019_1195 (size=68) 2024-11-23T08:24:42,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:42,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing ef38f1c77fb1f2ee154421c6efb858f6, disabling compactions & flushes 2024-11-23T08:24:42,741 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:42,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:42,742 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. after waiting 0 ms 2024-11-23T08:24:42,742 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:42,742 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:42,742 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for ef38f1c77fb1f2ee154421c6efb858f6: Waiting for close lock at 1732350282741Disabling compacts and flushes for region at 1732350282741Disabling writes for close at 1732350282742 (+1 ms)Writing region close event to WAL at 1732350282742Closed at 1732350282742 2024-11-23T08:24:42,744 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:24:42,744 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732350282744"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350282744"}]},"ts":"1732350282744"} 2024-11-23T08:24:42,744 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732350282744"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350282744"}]},"ts":"1732350282744"} 2024-11-23T08:24:42,747 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:24:42,748 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:24:42,748 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350282748"}]},"ts":"1732350282748"} 2024-11-23T08:24:42,750 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T08:24:42,751 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:24:42,752 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:24:42,752 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:24:42,752 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:24:42,753 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:24:42,753 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:24:42,753 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:24:42,753 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:24:42,753 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:24:42,753 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:24:42,753 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:24:42,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, ASSIGN}] 2024-11-23T08:24:42,754 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, ASSIGN 2024-11-23T08:24:42,755 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, ASSIGN 2024-11-23T08:24:42,756 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:24:42,756 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:24:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T08:24:42,906 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:24:42,907 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=ef38f1c77fb1f2ee154421c6efb858f6, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:42,907 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ee2fd65d63bd4e9f2bc75773c639a445, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:42,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, ASSIGN because future has completed 2024-11-23T08:24:42,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:24:42,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, ASSIGN because future has completed 2024-11-23T08:24:42,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:24:43,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T08:24:43,064 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,064 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => ef38f1c77fb1f2ee154421c6efb858f6, NAME => 'testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:24:43,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. service=AccessControlService 2024-11-23T08:24:43,065 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:43,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:43,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,066 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => ee2fd65d63bd4e9f2bc75773c639a445, NAME => 'testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:24:43,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. service=AccessControlService 2024-11-23T08:24:43,067 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:24:43,067 INFO [StoreOpener-ef38f1c77fb1f2ee154421c6efb858f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:24:43,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,067 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,068 INFO [StoreOpener-ee2fd65d63bd4e9f2bc75773c639a445-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,068 INFO [StoreOpener-ef38f1c77fb1f2ee154421c6efb858f6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef38f1c77fb1f2ee154421c6efb858f6 columnFamilyName cf 2024-11-23T08:24:43,069 DEBUG [StoreOpener-ef38f1c77fb1f2ee154421c6efb858f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:43,069 INFO [StoreOpener-ef38f1c77fb1f2ee154421c6efb858f6-1 {}] regionserver.HStore(327): Store=ef38f1c77fb1f2ee154421c6efb858f6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:43,069 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,070 INFO [StoreOpener-ee2fd65d63bd4e9f2bc75773c639a445-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee2fd65d63bd4e9f2bc75773c639a445 columnFamilyName cf 2024-11-23T08:24:43,070 DEBUG [StoreOpener-ee2fd65d63bd4e9f2bc75773c639a445-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:24:43,070 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,070 INFO [StoreOpener-ee2fd65d63bd4e9f2bc75773c639a445-1 {}] regionserver.HStore(327): Store=ee2fd65d63bd4e9f2bc75773c639a445/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:24:43,070 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,070 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,071 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,073 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,073 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:43,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:24:43,075 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened ee2fd65d63bd4e9f2bc75773c639a445; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58894455, jitterRate=-0.12240423262119293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:43,075 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened ef38f1c77fb1f2ee154421c6efb858f6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70331940, jitterRate=0.04802757501602173}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:24:43,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,075 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for ee2fd65d63bd4e9f2bc75773c639a445: Running coprocessor pre-open hook at 1732350283067Writing region info on filesystem at 1732350283067Initializing all the Stores at 1732350283068 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350283068Cleaning up temporary data from old regions at 1732350283071 (+3 ms)Running coprocessor post-open hooks at 1732350283075 (+4 ms)Region opened successfully at 1732350283076 (+1 ms) 2024-11-23T08:24:43,076 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for ef38f1c77fb1f2ee154421c6efb858f6: Running coprocessor pre-open hook at 1732350283065Writing region info on filesystem at 1732350283066 (+1 ms)Initializing all the Stores at 1732350283066Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350283066Cleaning up temporary data from old regions at 1732350283071 (+5 ms)Running coprocessor post-open hooks at 1732350283075 (+4 ms)Region opened successfully at 1732350283076 (+1 ms) 2024-11-23T08:24:43,077 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., pid=100, masterSystemTime=1732350283063 2024-11-23T08:24:43,077 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6., pid=99, masterSystemTime=1732350283061 2024-11-23T08:24:43,079 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,079 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,080 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ee2fd65d63bd4e9f2bc75773c639a445, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:24:43,080 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,080 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,080 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=ef38f1c77fb1f2ee154421c6efb858f6, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:24:43,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:24:43,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:24:43,084 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b33d9013c60a,43417,1732350106962, table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T08:24:43,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=98 2024-11-23T08:24:43,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049 in 172 msec 2024-11-23T08:24:43,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=97 2024-11-23T08:24:43,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962 in 175 msec 2024-11-23T08:24:43,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, ASSIGN in 332 msec 2024-11-23T08:24:43,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-23T08:24:43,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, ASSIGN in 333 msec 2024-11-23T08:24:43,090 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:24:43,090 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350283090"}]},"ts":"1732350283090"} 2024-11-23T08:24:43,092 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T08:24:43,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:24:43,093 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-23T08:24:43,097 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:24:43,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:43,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:43,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:43,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:24:43,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:43,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:43,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:43,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:24:43,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 430 msec 2024-11-23T08:24:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T08:24:43,338 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T08:24:43,338 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-23T08:24:43,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:43,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-23T08:24:43,342 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:43,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-23T08:24:43,342 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:24:43,345 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:24:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350283345 (current time:1732350283345). 2024-11-23T08:24:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:24:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T08:24:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:24:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cce28f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:43,347 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:43,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:43,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:43,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564d826, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:43,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:43,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,349 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:43,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543f5a42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:43,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:43,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,351 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54260, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,352 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,353 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54f9ad7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:43,354 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:43,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:43,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:43,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4998f982, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:43,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:43,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,356 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:43,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9eca675, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:43,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:43,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54272, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,364 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,365 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:24:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:24:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:24:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T08:24:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T08:24:43,368 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:24:43,369 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:24:43,373 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:24:43,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742021_1197 (size=170) 2024-11-23T08:24:43,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742021_1197 (size=170) 2024-11-23T08:24:43,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742021_1197 (size=170) 2024-11-23T08:24:43,384 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:24:43,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445}] 2024-11-23T08:24:43,385 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,385 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T08:24:43,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-23T08:24:43,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for ee2fd65d63bd4e9f2bc75773c639a445: 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for ef38f1c77fb1f2ee154421c6efb858f6: 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:24:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742022_1198 (size=71) 2024-11-23T08:24:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742022_1198 (size=71) 2024-11-23T08:24:43,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742022_1198 (size=71) 2024-11-23T08:24:43,549 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,549 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-23T08:24:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-23T08:24:43,550 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,550 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 in 167 msec 2024-11-23T08:24:43,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742023_1199 (size=71) 2024-11-23T08:24:43,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742023_1199 (size=71) 2024-11-23T08:24:43,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742023_1199 (size=71) 2024-11-23T08:24:43,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-11-23T08:24:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-11-23T08:24:43,566 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,566 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-11-23T08:24:43,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 in 182 msec 2024-11-23T08:24:43,568 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:24:43,569 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:24:43,570 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:24:43,570 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-23T08:24:43,571 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-23T08:24:43,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742024_1200 (size=552) 2024-11-23T08:24:43,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742024_1200 (size=552) 2024-11-23T08:24:43,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742024_1200 (size=552) 2024-11-23T08:24:43,588 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:24:43,595 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:24:43,595 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-23T08:24:43,597 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:24:43,598 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T08:24:43,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 232 msec 2024-11-23T08:24:43,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T08:24:43,688 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T08:24:43,699 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='01b8794cfb5160ea230f8cba32769883c', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:24:43,703 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='2016feaf32e725d3691a2821be5aad8c', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:43,708 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1473af0e7212ab6c85fbb4e2bb26bfc0f', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:24:43,709 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='26cdb580cad62a67bc590b46d94d7dc2e', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,710 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='33168ed48cc2b11dd6e3354626fd62095', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,711 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='49290eee1d181c47ad58c9a2dc8b9d9e7', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,712 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='5f62ca224e4ef2c9885e25a02d591eb9a', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,713 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='6afa433567fa02fa28bc039a7dc8bed13', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,714 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='7a362a78494b4a29d00cb736cfd74b35', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,715 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:24:43,718 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-23T08:24:43,718 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,719 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:24:43,721 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:24:43,729 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:24:43,736 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:24:43,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:24:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350283740 (current time:1732350283740). 2024-11-23T08:24:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:24:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T08:24:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:24:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28342890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:43,742 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:43,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:43,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:43,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dfdf9b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:43,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:43,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,744 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:43,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e2f5b1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:43,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:43,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,749 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:43,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:43,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,750 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@138437fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:24:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:24:43,752 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:24:43,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:24:43,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:24:43,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a68685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:24:43,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:24:43,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,754 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:24:43,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@788762f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:24:43,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:24:43,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:24:43,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,759 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:24:43,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:24:43,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:24:43,765 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:24:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:24:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:24:43,766 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:24:43,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:24:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:24:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:24:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T08:24:43,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T08:24:43,771 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:24:43,773 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:24:43,776 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:24:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742025_1201 (size=165) 2024-11-23T08:24:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742025_1201 (size=165) 2024-11-23T08:24:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742025_1201 (size=165) 2024-11-23T08:24:43,792 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:24:43,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445}] 2024-11-23T08:24:43,793 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:43,793 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:43,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T08:24:43,921 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-23T08:24:43,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-11-23T08:24:43,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-11-23T08:24:43,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:43,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing ef38f1c77fb1f2ee154421c6efb858f6 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T08:24:43,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing ee2fd65d63bd4e9f2bc75773c639a445 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T08:24:43,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/.tmp/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 is 71, key is 1325fcdc18a299d6e535224cf8daaec4/cf:q/1732350283709/Put/seqid=0 2024-11-23T08:24:43,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/.tmp/cf/ee4d1f71c47f468e9121b44fa33a34df is 71, key is 0111df87ced13442b45066bfbc08e5e1/cf:q/1732350283706/Put/seqid=0 2024-11-23T08:24:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742026_1202 (size=8324) 2024-11-23T08:24:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742026_1202 (size=8324) 2024-11-23T08:24:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742026_1202 (size=8324) 2024-11-23T08:24:43,968 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/.tmp/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 2024-11-23T08:24:43,975 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/.tmp/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 2024-11-23T08:24:43,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742027_1203 (size=5286) 2024-11-23T08:24:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742027_1203 (size=5286) 2024-11-23T08:24:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742027_1203 (size=5286) 2024-11-23T08:24:43,982 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98, entries=47, sequenceid=6, filesize=8.1 K 2024-11-23T08:24:43,983 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/.tmp/cf/ee4d1f71c47f468e9121b44fa33a34df 2024-11-23T08:24:43,983 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for ee2fd65d63bd4e9f2bc75773c639a445 in 38ms, sequenceid=6, compaction requested=false 2024-11-23T08:24:43,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for ee2fd65d63bd4e9f2bc75773c639a445: 2024-11-23T08:24:43,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. for snaptb0-testExportFileSystemState completed. 2024-11-23T08:24:43,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T08:24:43,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:43,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98] hfiles 2024-11-23T08:24:43,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 for snapshot=snaptb0-testExportFileSystemState 2024-11-23T08:24:43,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/.tmp/cf/ee4d1f71c47f468e9121b44fa33a34df as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df 2024-11-23T08:24:43,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742028_1204 (size=110) 2024-11-23T08:24:43,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742028_1204 (size=110) 2024-11-23T08:24:43,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742028_1204 (size=110) 2024-11-23T08:24:43,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:24:43,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-11-23T08:24:44,000 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df, entries=3, sequenceid=6, filesize=5.2 K 2024-11-23T08:24:44,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-11-23T08:24:44,000 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:44,000 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:24:44,001 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ef38f1c77fb1f2ee154421c6efb858f6 in 56ms, sequenceid=6, compaction requested=false 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for ef38f1c77fb1f2ee154421c6efb858f6: 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. for snaptb0-testExportFileSystemState completed. 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df] hfiles 2024-11-23T08:24:44,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df for snapshot=snaptb0-testExportFileSystemState 2024-11-23T08:24:44,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445 in 209 msec 2024-11-23T08:24:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742029_1205 (size=110) 2024-11-23T08:24:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742029_1205 (size=110) 2024-11-23T08:24:44,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742029_1205 (size=110) 2024-11-23T08:24:44,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:24:44,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-23T08:24:44,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-11-23T08:24:44,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:44,009 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:24:44,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-11-23T08:24:44,011 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:24:44,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6 in 218 msec 2024-11-23T08:24:44,012 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:24:44,013 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:24:44,013 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-23T08:24:44,014 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T08:24:44,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742030_1206 (size=630) 2024-11-23T08:24:44,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742030_1206 (size=630) 2024-11-23T08:24:44,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742030_1206 (size=630) 2024-11-23T08:24:44,049 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:24:44,056 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:24:44,057 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T08:24:44,058 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:24:44,059 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T08:24:44,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 292 msec 2024-11-23T08:24:44,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T08:24:44,087 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T08:24:44,088 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087 2024-11-23T08:24:44,088 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:44,123 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:24:44,123 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T08:24:44,125 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:24:44,133 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T08:24:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742031_1207 (size=165) 2024-11-23T08:24:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742031_1207 (size=165) 2024-11-23T08:24:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742031_1207 (size=165) 2024-11-23T08:24:44,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742032_1208 (size=630) 2024-11-23T08:24:44,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742032_1208 (size=630) 2024-11-23T08:24:44,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742032_1208 (size=630) 2024-11-23T08:24:44,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:44,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:44,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:44,707 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:24:45,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-12611278030920595938.jar 2024-11-23T08:24:45,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-4805260519887630140.jar 2024-11-23T08:24:45,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,336 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:24:45,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:24:45,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:24:45,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:24:45,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:24:45,339 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:24:45,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:24:45,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:24:45,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:24:45,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:24:45,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:24:45,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:24:45,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:45,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:45,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:45,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:45,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:24:45,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:45,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:24:45,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742033_1209 (size=131440) 2024-11-23T08:24:45,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742033_1209 (size=131440) 2024-11-23T08:24:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742033_1209 (size=131440) 2024-11-23T08:24:45,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T08:24:45,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T08:24:45,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T08:24:45,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T08:24:45,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T08:24:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T08:24:45,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742036_1212 (size=903733) 2024-11-23T08:24:45,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742036_1212 (size=903733) 2024-11-23T08:24:45,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742036_1212 (size=903733) 2024-11-23T08:24:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T08:24:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T08:24:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T08:24:45,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T08:24:45,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T08:24:45,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T08:24:45,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742039_1215 (size=77835) 2024-11-23T08:24:45,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742039_1215 (size=77835) 2024-11-23T08:24:45,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742039_1215 (size=77835) 2024-11-23T08:24:45,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742040_1216 (size=30949) 2024-11-23T08:24:45,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742040_1216 (size=30949) 2024-11-23T08:24:45,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742040_1216 (size=30949) 2024-11-23T08:24:45,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742041_1217 (size=6424742) 2024-11-23T08:24:45,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742041_1217 (size=6424742) 2024-11-23T08:24:45,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742041_1217 (size=6424742) 2024-11-23T08:24:45,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T08:24:45,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T08:24:45,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T08:24:45,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T08:24:45,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T08:24:45,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T08:24:45,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742044_1220 (size=232957) 2024-11-23T08:24:45,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742044_1220 (size=232957) 2024-11-23T08:24:45,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742044_1220 (size=232957) 2024-11-23T08:24:45,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742045_1221 (size=127628) 2024-11-23T08:24:45,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742045_1221 (size=127628) 2024-11-23T08:24:45,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742045_1221 (size=127628) 2024-11-23T08:24:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742046_1222 (size=20406) 2024-11-23T08:24:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742046_1222 (size=20406) 2024-11-23T08:24:45,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742046_1222 (size=20406) 2024-11-23T08:24:45,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T08:24:45,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T08:24:45,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T08:24:45,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742048_1224 (size=440957) 2024-11-23T08:24:45,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742048_1224 (size=440957) 2024-11-23T08:24:45,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742048_1224 (size=440957) 2024-11-23T08:24:45,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742049_1225 (size=217634) 2024-11-23T08:24:45,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742049_1225 (size=217634) 2024-11-23T08:24:45,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742049_1225 (size=217634) 2024-11-23T08:24:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742050_1226 (size=1832290) 2024-11-23T08:24:45,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742050_1226 (size=1832290) 2024-11-23T08:24:45,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742050_1226 (size=1832290) 2024-11-23T08:24:45,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742051_1227 (size=322274) 2024-11-23T08:24:45,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742051_1227 (size=322274) 2024-11-23T08:24:45,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742051_1227 (size=322274) 2024-11-23T08:24:45,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742052_1228 (size=503880) 2024-11-23T08:24:45,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742052_1228 (size=503880) 2024-11-23T08:24:45,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742052_1228 (size=503880) 2024-11-23T08:24:45,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742053_1229 (size=29229) 2024-11-23T08:24:45,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742053_1229 (size=29229) 2024-11-23T08:24:45,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742053_1229 (size=29229) 2024-11-23T08:24:45,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742054_1230 (size=24096) 2024-11-23T08:24:45,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742054_1230 (size=24096) 2024-11-23T08:24:45,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742054_1230 (size=24096) 2024-11-23T08:24:46,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742055_1231 (size=111872) 2024-11-23T08:24:46,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742055_1231 (size=111872) 2024-11-23T08:24:46,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742055_1231 (size=111872) 2024-11-23T08:24:46,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742056_1232 (size=45609) 2024-11-23T08:24:46,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742056_1232 (size=45609) 2024-11-23T08:24:46,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742056_1232 (size=45609) 2024-11-23T08:24:46,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742057_1233 (size=136454) 2024-11-23T08:24:46,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742057_1233 (size=136454) 2024-11-23T08:24:46,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742057_1233 (size=136454) 2024-11-23T08:24:46,020 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:24:46,023 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-23T08:24:46,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T08:24:46,026 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T08:24:46,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742058_1234 (size=447) 2024-11-23T08:24:46,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742058_1234 (size=447) 2024-11-23T08:24:46,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742058_1234 (size=447) 2024-11-23T08:24:46,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742059_1235 (size=21) 2024-11-23T08:24:46,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742059_1235 (size=21) 2024-11-23T08:24:46,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742059_1235 (size=21) 2024-11-23T08:24:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742060_1236 (size=304008) 2024-11-23T08:24:46,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742060_1236 (size=304008) 2024-11-23T08:24:46,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742060_1236 (size=304008) 2024-11-23T08:24:46,126 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0003_000001 (auth:SIMPLE) from 127.0.0.1:53034 2024-11-23T08:24:46,135 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:24:46,135 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:24:46,171 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000001/launch_container.sh] 2024-11-23T08:24:46,171 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000001/container_tokens] 2024-11-23T08:24:46,171 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0003/container_1732350113698_0003_01_000001/sysfs] 2024-11-23T08:24:46,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T08:24:46,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T08:24:46,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T08:24:46,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T08:24:46,967 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:37674 2024-11-23T08:24:47,584 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:24:50,310 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ee2fd65d63bd4e9f2bc75773c639a445 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:24:50,310 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ef38f1c77fb1f2ee154421c6efb858f6 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:24:53,709 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:50496 2024-11-23T08:24:54,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742061_1237 (size=349706) 2024-11-23T08:24:54,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742061_1237 (size=349706) 2024-11-23T08:24:54,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742061_1237 (size=349706) 2024-11-23T08:24:56,415 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:45122 2024-11-23T08:24:56,417 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:56570 2024-11-23T08:25:02,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742062_1238 (size=8324) 2024-11-23T08:25:02,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742062_1238 (size=8324) 2024-11-23T08:25:02,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742062_1238 (size=8324) 2024-11-23T08:25:02,901 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000002/launch_container.sh] 2024-11-23T08:25:02,901 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000002/container_tokens] 2024-11-23T08:25:02,901 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000002/sysfs] 2024-11-23T08:25:04,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742064_1240 (size=5286) 2024-11-23T08:25:04,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742064_1240 (size=5286) 2024-11-23T08:25:04,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742064_1240 (size=5286) 2024-11-23T08:25:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742063_1239 (size=22168) 2024-11-23T08:25:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742063_1239 (size=22168) 2024-11-23T08:25:04,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742063_1239 (size=22168) 2024-11-23T08:25:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742065_1241 (size=466) 2024-11-23T08:25:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742065_1241 (size=466) 2024-11-23T08:25:04,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742065_1241 (size=466) 2024-11-23T08:25:04,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742066_1242 (size=22168) 2024-11-23T08:25:04,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742066_1242 (size=22168) 2024-11-23T08:25:04,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742066_1242 (size=22168) 2024-11-23T08:25:04,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742067_1243 (size=349706) 2024-11-23T08:25:04,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742067_1243 (size=349706) 2024-11-23T08:25:04,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742067_1243 (size=349706) 2024-11-23T08:25:04,306 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:41528 2024-11-23T08:25:04,316 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732350113698_0004_01_000003 is : 143 2024-11-23T08:25:04,330 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000003/launch_container.sh] 2024-11-23T08:25:04,330 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000003/container_tokens] 2024-11-23T08:25:04,330 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000003/sysfs] 2024-11-23T08:25:05,381 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:25:05,383 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:25:05,400 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-11-23T08:25:05,400 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:25:05,401 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:25:05,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T08:25:05,402 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T08:25:05,402 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T08:25:05,402 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T08:25:05,402 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T08:25:05,402 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350284087/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T08:25:05,410 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-23T08:25:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T08:25:05,415 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350305414"}]},"ts":"1732350305414"} 2024-11-23T08:25:05,417 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T08:25:05,417 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-23T08:25:05,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-23T08:25:05,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, UNASSIGN}] 2024-11-23T08:25:05,421 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, UNASSIGN 2024-11-23T08:25:05,421 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, UNASSIGN 2024-11-23T08:25:05,421 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ee2fd65d63bd4e9f2bc75773c639a445, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:05,421 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=ef38f1c77fb1f2ee154421c6efb858f6, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:05,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, UNASSIGN because future has completed 2024-11-23T08:25:05,427 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:05,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:05,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, UNASSIGN because future has completed 2024-11-23T08:25:05,429 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:05,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:25:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T08:25:05,581 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing ee2fd65d63bd4e9f2bc75773c639a445, disabling compactions & flushes 2024-11-23T08:25:05,582 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:25:05,582 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. after waiting 0 ms 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing ef38f1c77fb1f2ee154421c6efb858f6, disabling compactions & flushes 2024-11-23T08:25:05,582 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:25:05,582 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. after waiting 0 ms 2024-11-23T08:25:05,583 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:25:05,587 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:25:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:25:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:05,588 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6. 2024-11-23T08:25:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for ef38f1c77fb1f2ee154421c6efb858f6: Waiting for close lock at 1732350305582Running coprocessor pre-close hooks at 1732350305582Disabling compacts and flushes for region at 1732350305582Disabling writes for close at 1732350305583 (+1 ms)Writing region close event to WAL at 1732350305583Running coprocessor post-close hooks at 1732350305588 (+5 ms)Closed at 1732350305588 2024-11-23T08:25:05,589 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445. 2024-11-23T08:25:05,589 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for ee2fd65d63bd4e9f2bc75773c639a445: Waiting for close lock at 1732350305582Running coprocessor pre-close hooks at 1732350305582Disabling compacts and flushes for region at 1732350305582Disabling writes for close at 1732350305582Writing region close event to WAL at 1732350305583 (+1 ms)Running coprocessor post-close hooks at 1732350305588 (+5 ms)Closed at 1732350305589 (+1 ms) 2024-11-23T08:25:05,591 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:25:05,592 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=ef38f1c77fb1f2ee154421c6efb858f6, regionState=CLOSED 2024-11-23T08:25:05,593 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ee2fd65d63bd4e9f2bc75773c639a445, regionState=CLOSED 2024-11-23T08:25:05,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:25:05,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:05,598 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:25:05,603 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-11-23T08:25:05,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-23T08:25:05,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure ee2fd65d63bd4e9f2bc75773c639a445, server=b33d9013c60a,39649,1732350107049 in 175 msec 2024-11-23T08:25:05,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure ef38f1c77fb1f2ee154421c6efb858f6, server=b33d9013c60a,43417,1732350106962 in 171 msec 2024-11-23T08:25:05,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ef38f1c77fb1f2ee154421c6efb858f6, UNASSIGN in 184 msec 2024-11-23T08:25:05,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-11-23T08:25:05,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ee2fd65d63bd4e9f2bc75773c639a445, UNASSIGN in 185 msec 2024-11-23T08:25:05,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-23T08:25:05,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 188 msec 2024-11-23T08:25:05,612 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350305612"}]},"ts":"1732350305612"} 2024-11-23T08:25:05,614 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T08:25:05,614 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-23T08:25:05,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 205 msec 2024-11-23T08:25:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T08:25:05,728 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T08:25:05,729 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-23T08:25:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,731 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-23T08:25:05,732 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,734 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-23T08:25:05,735 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:25:05,735 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:25:05,737 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/recovered.edits] 2024-11-23T08:25:05,737 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/recovered.edits] 2024-11-23T08:25:05,740 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/cf/ee4d1f71c47f468e9121b44fa33a34df 2024-11-23T08:25:05,741 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/cf/89d27fe9c7774e85a7b1f5bb7cd15f98 2024-11-23T08:25:05,743 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6/recovered.edits/9.seqid 2024-11-23T08:25:05,743 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ef38f1c77fb1f2ee154421c6efb858f6 2024-11-23T08:25:05,743 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445/recovered.edits/9.seqid 2024-11-23T08:25:05,744 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemState/ee2fd65d63bd4e9f2bc75773c639a445 2024-11-23T08:25:05,744 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-23T08:25:05,746 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,749 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-23T08:25:05,752 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-23T08:25:05,753 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,753 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-23T08:25:05,754 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350305753"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:05,754 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350305753"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:05,756 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:25:05,756 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ef38f1c77fb1f2ee154421c6efb858f6, NAME => 'testtb-testExportFileSystemState,,1732350282704.ef38f1c77fb1f2ee154421c6efb858f6.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ee2fd65d63bd4e9f2bc75773c639a445, NAME => 'testtb-testExportFileSystemState,1,1732350282704.ee2fd65d63bd4e9f2bc75773c639a445.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:25:05,756 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-23T08:25:05,757 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350305757"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:05,759 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-23T08:25:05,759 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T08:25:05,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 30 msec 2024-11-23T08:25:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T08:25:05,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T08:25:05,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T08:25:05,785 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:05,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:05,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-23T08:25:05,794 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-23T08:25:05,794 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T08:25:05,800 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T08:25:05,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-23T08:25:05,804 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T08:25:05,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-23T08:25:05,825 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=808 (was 799) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3706 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 121757) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:56708 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:39589 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:58256 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-902273464_1 at /127.0.0.1:56694 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-902273464_1 at /127.0.0.1:58234 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:56876 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=768 (was 768), ProcessCount=18 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3206 (was 3297) 2024-11-23T08:25:05,825 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-23T08:25:05,843 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=808, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=768, ProcessCount=18, AvailableMemoryMB=3206 2024-11-23T08:25:05,843 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-23T08:25:05,844 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:25:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:05,846 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:25:05,847 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:05,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-11-23T08:25:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T08:25:05,848 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:25:05,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742068_1244 (size=404) 2024-11-23T08:25:05,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742068_1244 (size=404) 2024-11-23T08:25:05,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742068_1244 (size=404) 2024-11-23T08:25:05,856 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0e68eb80531b116ca00181dd63104595, NAME => 'testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:05,856 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 39d2d9135938bc1dceb526423ba88242, NAME => 'testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:05,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742069_1245 (size=65) 2024-11-23T08:25:05,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742070_1246 (size=65) 2024-11-23T08:25:05,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742070_1246 (size=65) 2024-11-23T08:25:05,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742070_1246 (size=65) 2024-11-23T08:25:05,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742069_1245 (size=65) 2024-11-23T08:25:05,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742069_1245 (size=65) 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 0e68eb80531b116ca00181dd63104595, disabling compactions & flushes 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 39d2d9135938bc1dceb526423ba88242, disabling compactions & flushes 2024-11-23T08:25:05,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:05,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. after waiting 0 ms 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. after waiting 0 ms 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:05,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:05,872 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 39d2d9135938bc1dceb526423ba88242: Waiting for close lock at 1732350305872Disabling compacts and flushes for region at 1732350305872Disabling writes for close at 1732350305872Writing region close event to WAL at 1732350305872Closed at 1732350305872 2024-11-23T08:25:05,872 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0e68eb80531b116ca00181dd63104595: Waiting for close lock at 1732350305872Disabling compacts and flushes for region at 1732350305872Disabling writes for close at 1732350305872Writing region close event to WAL at 1732350305872Closed at 1732350305872 2024-11-23T08:25:05,874 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:25:05,875 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350305874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350305874"}]},"ts":"1732350305874"} 2024-11-23T08:25:05,875 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350305874"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350305874"}]},"ts":"1732350305874"} 2024-11-23T08:25:05,877 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:25:05,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:25:05,878 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350305878"}]},"ts":"1732350305878"} 2024-11-23T08:25:05,880 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-23T08:25:05,881 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:25:05,882 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:25:05,882 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:25:05,882 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:25:05,882 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:25:05,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, ASSIGN}] 2024-11-23T08:25:05,884 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, ASSIGN 2024-11-23T08:25:05,884 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, ASSIGN 2024-11-23T08:25:05,885 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:25:05,885 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, ASSIGN; state=OFFLINE, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:25:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T08:25:06,036 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:25:06,036 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=39d2d9135938bc1dceb526423ba88242, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:25:06,042 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0e68eb80531b116ca00181dd63104595, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:06,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, ASSIGN because future has completed 2024-11-23T08:25:06,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:25:06,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, ASSIGN because future has completed 2024-11-23T08:25:06,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T08:25:06,208 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,208 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 0e68eb80531b116ca00181dd63104595, NAME => 'testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:25:06,208 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. service=AccessControlService 2024-11-23T08:25:06,208 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:06,209 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,209 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:06,209 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,209 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,209 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:06,209 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 39d2d9135938bc1dceb526423ba88242, NAME => 'testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:25:06,210 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. service=AccessControlService 2024-11-23T08:25:06,210 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:06,210 INFO [StoreOpener-0e68eb80531b116ca00181dd63104595-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,210 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,210 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:06,210 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,210 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,212 INFO [StoreOpener-0e68eb80531b116ca00181dd63104595-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0e68eb80531b116ca00181dd63104595 columnFamilyName cf 2024-11-23T08:25:06,212 DEBUG [StoreOpener-0e68eb80531b116ca00181dd63104595-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:06,212 INFO [StoreOpener-39d2d9135938bc1dceb526423ba88242-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,212 INFO [StoreOpener-0e68eb80531b116ca00181dd63104595-1 {}] regionserver.HStore(327): Store=0e68eb80531b116ca00181dd63104595/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:06,212 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,213 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,213 INFO [StoreOpener-39d2d9135938bc1dceb526423ba88242-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 39d2d9135938bc1dceb526423ba88242 columnFamilyName cf 2024-11-23T08:25:06,213 DEBUG [StoreOpener-39d2d9135938bc1dceb526423ba88242-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:06,213 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,214 INFO [StoreOpener-39d2d9135938bc1dceb526423ba88242-1 {}] regionserver.HStore(327): Store=39d2d9135938bc1dceb526423ba88242/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:06,214 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,214 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,214 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,214 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,215 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,215 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,215 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,215 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,217 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,217 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:06,217 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 0e68eb80531b116ca00181dd63104595; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67579948, jitterRate=0.007019698619842529}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:06,217 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,218 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 0e68eb80531b116ca00181dd63104595: Running coprocessor pre-open hook at 1732350306209Writing region info on filesystem at 1732350306209Initializing all the Stores at 1732350306210 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350306210Cleaning up temporary data from old regions at 1732350306214 (+4 ms)Running coprocessor post-open hooks at 1732350306217 (+3 ms)Region opened successfully at 1732350306218 (+1 ms) 2024-11-23T08:25:06,219 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:06,219 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595., pid=118, masterSystemTime=1732350306205 2024-11-23T08:25:06,219 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 39d2d9135938bc1dceb526423ba88242; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65392477, jitterRate=-0.025576159358024597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:06,220 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,220 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 39d2d9135938bc1dceb526423ba88242: Running coprocessor pre-open hook at 1732350306211Writing region info on filesystem at 1732350306211Initializing all the Stores at 1732350306212 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350306212Cleaning up temporary data from old regions at 1732350306215 (+3 ms)Running coprocessor post-open hooks at 1732350306220 (+5 ms)Region opened successfully at 1732350306220 2024-11-23T08:25:06,221 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242., pid=117, masterSystemTime=1732350306205 2024-11-23T08:25:06,221 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,221 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,222 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0e68eb80531b116ca00181dd63104595, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:06,222 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:06,222 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:06,223 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=39d2d9135938bc1dceb526423ba88242, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:25:06,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:06,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:25:06,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-11-23T08:25:06,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049 in 171 msec 2024-11-23T08:25:06,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, ASSIGN in 345 msec 2024-11-23T08:25:06,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-11-23T08:25:06,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787 in 175 msec 2024-11-23T08:25:06,230 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-11-23T08:25:06,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, ASSIGN in 346 msec 2024-11-23T08:25:06,231 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:25:06,231 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350306231"}]},"ts":"1732350306231"} 2024-11-23T08:25:06,233 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-23T08:25:06,234 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:25:06,234 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-23T08:25:06,237 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T08:25:06,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T08:25:06,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-23T08:25:06,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T08:25:06,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:06,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:06,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:06,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:06,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:06,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:06,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:06,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:06,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 437 msec 2024-11-23T08:25:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T08:25:06,478 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T08:25:06,478 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-23T08:25:06,478 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:06,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-23T08:25:06,483 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:06,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-23T08:25:06,483 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T08:25:06,486 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T08:25:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350306486 (current time:1732350306486). 2024-11-23T08:25:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:25:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T08:25:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:25:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42339fdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:06,488 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:06,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:06,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:06,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@345db04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:06,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:06,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,490 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:06,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@261f9c94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:06,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:06,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,494 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52740, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,495 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:25:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,495 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@255eb63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:06,497 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63b418fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:06,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,499 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:06,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b749bde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:06,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:06,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52756, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:06,506 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,508 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:25:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,508 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T08:25:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:25:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T08:25:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T08:25:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T08:25:06,512 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:25:06,513 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:25:06,516 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:25:06,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742071_1247 (size=161) 2024-11-23T08:25:06,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742071_1247 (size=161) 2024-11-23T08:25:06,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742071_1247 (size=161) 2024-11-23T08:25:06,526 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:25:06,526 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242}] 2024-11-23T08:25:06,528 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,528 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T08:25:06,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-11-23T08:25:06,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 39d2d9135938bc1dceb526423ba88242: 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 0e68eb80531b116ca00181dd63104595: 2024-11-23T08:25:06,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T08:25:06,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:06,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:06,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:25:06,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742072_1248 (size=68) 2024-11-23T08:25:06,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742073_1249 (size=68) 2024-11-23T08:25:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742072_1248 (size=68) 2024-11-23T08:25:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742072_1248 (size=68) 2024-11-23T08:25:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742073_1249 (size=68) 2024-11-23T08:25:06,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742073_1249 (size=68) 2024-11-23T08:25:06,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-11-23T08:25:06,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:06,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-23T08:25:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-11-23T08:25:06,703 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,703 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-11-23T08:25:06,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,704 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 in 178 msec 2024-11-23T08:25:06,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-11-23T08:25:06,709 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:25:06,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 in 179 msec 2024-11-23T08:25:06,710 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:25:06,711 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:25:06,711 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:06,712 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:06,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742074_1250 (size=543) 2024-11-23T08:25:06,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742074_1250 (size=543) 2024-11-23T08:25:06,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742074_1250 (size=543) 2024-11-23T08:25:06,731 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:25:06,746 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:25:06,747 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:06,749 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:25:06,750 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T08:25:06,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 241 msec 2024-11-23T08:25:06,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T08:25:06,828 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T08:25:06,832 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='096ccd250a230584d3e807d58ca78d863', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:06,833 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='194c528aaccfe87edff7b81af7e1c6f02', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:25:06,834 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='2bbc2c5c6844d439d9d2e352512dae921', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:25:06,839 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:25:06,840 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3bc16e8a06e38913c1db0d0be2934e18f', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:25:06,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:25:06,841 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='4028aef42ac2cee3a889676f1cd7d2fb0', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:25:06,842 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T08:25:06,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-23T08:25:06,846 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:06,846 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:06,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T08:25:06,855 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T08:25:06,861 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T08:25:06,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T08:25:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350306863 (current time:1732350306863). 2024-11-23T08:25:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:25:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T08:25:06,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:25:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a7cccb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:06,865 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff5ddf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:06,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,866 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:06,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2408db2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:06,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:06,869 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52766, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:06,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:06,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,872 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:06,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@585d7072, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:06,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:06,874 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:06,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:06,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:06,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@175c59f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:06,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:06,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,876 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:06,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47cae6d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:06,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:06,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:06,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:06,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:06,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:06,885 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T08:25:06,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:25:06,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T08:25:06,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T08:25:06,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T08:25:06,889 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:25:06,890 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:25:06,893 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:25:06,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742075_1251 (size=156) 2024-11-23T08:25:06,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742075_1251 (size=156) 2024-11-23T08:25:06,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742075_1251 (size=156) 2024-11-23T08:25:06,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:25:06,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242}] 2024-11-23T08:25:06,920 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:06,920 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:06,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T08:25:07,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-11-23T08:25:07,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-11-23T08:25:07,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:07,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:07,075 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 0e68eb80531b116ca00181dd63104595 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-23T08:25:07,075 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 39d2d9135938bc1dceb526423ba88242 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-23T08:25:07,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/.tmp/cf/d312f5e3c3d044bfa96dadf9638ad893 is 71, key is 111bd37aa3d9d2d049832c2c7fb330dd/cf:q/1732350306841/Put/seqid=0 2024-11-23T08:25:07,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/.tmp/cf/291a96c9d05e4fd99a30f21389012821 is 71, key is 00e568717fdc624a5b39ba7ef932e3cd/cf:q/1732350306838/Put/seqid=0 2024-11-23T08:25:07,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742076_1252 (size=8052) 2024-11-23T08:25:07,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742076_1252 (size=8052) 2024-11-23T08:25:07,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742076_1252 (size=8052) 2024-11-23T08:25:07,162 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/.tmp/cf/d312f5e3c3d044bfa96dadf9638ad893 2024-11-23T08:25:07,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/.tmp/cf/d312f5e3c3d044bfa96dadf9638ad893 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893 2024-11-23T08:25:07,178 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893, entries=43, sequenceid=6, filesize=7.9 K 2024-11-23T08:25:07,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 39d2d9135938bc1dceb526423ba88242 in 104ms, sequenceid=6, compaction requested=false 2024-11-23T08:25:07,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-23T08:25:07,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 39d2d9135938bc1dceb526423ba88242: 2024-11-23T08:25:07,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. for snaptb0-testConsecutiveExports completed. 2024-11-23T08:25:07,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T08:25:07,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:07,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893] hfiles 2024-11-23T08:25:07,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893 for snapshot=snaptb0-testConsecutiveExports 2024-11-23T08:25:07,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742077_1253 (size=5566) 2024-11-23T08:25:07,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742077_1253 (size=5566) 2024-11-23T08:25:07,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742077_1253 (size=5566) 2024-11-23T08:25:07,190 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/.tmp/cf/291a96c9d05e4fd99a30f21389012821 2024-11-23T08:25:07,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/.tmp/cf/291a96c9d05e4fd99a30f21389012821 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821 2024-11-23T08:25:07,206 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821, entries=7, sequenceid=6, filesize=5.4 K 2024-11-23T08:25:07,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T08:25:07,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 0e68eb80531b116ca00181dd63104595 in 132ms, sequenceid=6, compaction requested=false 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 0e68eb80531b116ca00181dd63104595: 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. for snaptb0-testConsecutiveExports completed. 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821] hfiles 2024-11-23T08:25:07,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821 for snapshot=snaptb0-testConsecutiveExports 2024-11-23T08:25:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742078_1254 (size=107) 2024-11-23T08:25:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742078_1254 (size=107) 2024-11-23T08:25:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742078_1254 (size=107) 2024-11-23T08:25:07,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:07,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-11-23T08:25:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-11-23T08:25:07,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:07,246 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:07,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 39d2d9135938bc1dceb526423ba88242 in 329 msec 2024-11-23T08:25:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742079_1255 (size=107) 2024-11-23T08:25:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742079_1255 (size=107) 2024-11-23T08:25:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742079_1255 (size=107) 2024-11-23T08:25:07,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:07,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-23T08:25:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-11-23T08:25:07,254 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:07,254 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:07,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-11-23T08:25:07,261 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:25:07,261 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0e68eb80531b116ca00181dd63104595 in 338 msec 2024-11-23T08:25:07,263 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:25:07,264 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:25:07,265 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-23T08:25:07,266 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T08:25:07,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742080_1256 (size=621) 2024-11-23T08:25:07,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742080_1256 (size=621) 2024-11-23T08:25:07,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742080_1256 (size=621) 2024-11-23T08:25:07,325 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:25:07,336 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:25:07,337 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T08:25:07,339 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:25:07,339 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T08:25:07,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 453 msec 2024-11-23T08:25:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T08:25:07,518 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T08:25:07,518 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518 2024-11-23T08:25:07,518 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:07,546 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:07,546 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@22fc879, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T08:25:07,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:25:07,551 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T08:25:07,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:07,578 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:07,579 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-6803591232565465541.jar 2024-11-23T08:25:08,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-17137863303342344216.jar 2024-11-23T08:25:08,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:08,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:25:08,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:25:08,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:25:08,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:25:08,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:25:08,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:25:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:25:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:25:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:25:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:25:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:25:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742081_1257 (size=131440) 2024-11-23T08:25:08,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742081_1257 (size=131440) 2024-11-23T08:25:08,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742081_1257 (size=131440) 2024-11-23T08:25:08,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T08:25:08,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T08:25:08,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T08:25:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T08:25:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T08:25:08,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T08:25:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742084_1260 (size=903733) 2024-11-23T08:25:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742084_1260 (size=903733) 2024-11-23T08:25:08,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742084_1260 (size=903733) 2024-11-23T08:25:08,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T08:25:08,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T08:25:08,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T08:25:08,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T08:25:08,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T08:25:08,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T08:25:08,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742087_1263 (size=77835) 2024-11-23T08:25:08,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742087_1263 (size=77835) 2024-11-23T08:25:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742087_1263 (size=77835) 2024-11-23T08:25:08,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742088_1264 (size=440957) 2024-11-23T08:25:08,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742088_1264 (size=440957) 2024-11-23T08:25:08,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742088_1264 (size=440957) 2024-11-23T08:25:08,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742089_1265 (size=30949) 2024-11-23T08:25:08,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742089_1265 (size=30949) 2024-11-23T08:25:08,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742089_1265 (size=30949) 2024-11-23T08:25:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742090_1266 (size=1597347) 2024-11-23T08:25:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742090_1266 (size=1597347) 2024-11-23T08:25:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742090_1266 (size=1597347) 2024-11-23T08:25:08,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T08:25:08,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T08:25:08,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T08:25:08,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742092_1268 (size=6424742) 2024-11-23T08:25:08,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742092_1268 (size=6424742) 2024-11-23T08:25:08,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742092_1268 (size=6424742) 2024-11-23T08:25:08,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742093_1269 (size=232957) 2024-11-23T08:25:08,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742093_1269 (size=232957) 2024-11-23T08:25:08,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742093_1269 (size=232957) 2024-11-23T08:25:09,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742094_1270 (size=127628) 2024-11-23T08:25:09,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742094_1270 (size=127628) 2024-11-23T08:25:09,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742094_1270 (size=127628) 2024-11-23T08:25:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742095_1271 (size=20406) 2024-11-23T08:25:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742095_1271 (size=20406) 2024-11-23T08:25:09,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742095_1271 (size=20406) 2024-11-23T08:25:09,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T08:25:09,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T08:25:09,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T08:25:09,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742097_1273 (size=217634) 2024-11-23T08:25:09,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742097_1273 (size=217634) 2024-11-23T08:25:09,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742097_1273 (size=217634) 2024-11-23T08:25:09,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T08:25:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T08:25:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T08:25:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742099_1275 (size=322274) 2024-11-23T08:25:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742099_1275 (size=322274) 2024-11-23T08:25:09,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742099_1275 (size=322274) 2024-11-23T08:25:09,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742100_1276 (size=503880) 2024-11-23T08:25:09,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742100_1276 (size=503880) 2024-11-23T08:25:09,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742100_1276 (size=503880) 2024-11-23T08:25:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742101_1277 (size=29229) 2024-11-23T08:25:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742101_1277 (size=29229) 2024-11-23T08:25:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742101_1277 (size=29229) 2024-11-23T08:25:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742102_1278 (size=24096) 2024-11-23T08:25:09,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742102_1278 (size=24096) 2024-11-23T08:25:09,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742102_1278 (size=24096) 2024-11-23T08:25:09,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742103_1279 (size=111872) 2024-11-23T08:25:09,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742103_1279 (size=111872) 2024-11-23T08:25:09,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742103_1279 (size=111872) 2024-11-23T08:25:09,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742104_1280 (size=45609) 2024-11-23T08:25:09,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742104_1280 (size=45609) 2024-11-23T08:25:09,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742104_1280 (size=45609) 2024-11-23T08:25:09,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742105_1281 (size=136454) 2024-11-23T08:25:09,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742105_1281 (size=136454) 2024-11-23T08:25:09,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742105_1281 (size=136454) 2024-11-23T08:25:09,627 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:25:09,629 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T08:25:09,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-11-23T08:25:09,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-11-23T08:25:09,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742106_1282 (size=441) 2024-11-23T08:25:09,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742106_1282 (size=441) 2024-11-23T08:25:09,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742106_1282 (size=441) 2024-11-23T08:25:09,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742107_1283 (size=21) 2024-11-23T08:25:09,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742107_1283 (size=21) 2024-11-23T08:25:09,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742107_1283 (size=21) 2024-11-23T08:25:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742108_1284 (size=304047) 2024-11-23T08:25:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742108_1284 (size=304047) 2024-11-23T08:25:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742108_1284 (size=304047) 2024-11-23T08:25:10,371 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:10,371 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:10,374 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0004_000001 (auth:SIMPLE) from 127.0.0.1:60960 2024-11-23T08:25:10,383 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000001/launch_container.sh] 2024-11-23T08:25:10,383 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000001/container_tokens] 2024-11-23T08:25:10,383 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0004/container_1732350113698_0004_01_000001/sysfs] 2024-11-23T08:25:11,121 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:25:11,330 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:41530 2024-11-23T08:25:14,707 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:25:17,675 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:47278 2024-11-23T08:25:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742109_1285 (size=349745) 2024-11-23T08:25:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742109_1285 (size=349745) 2024-11-23T08:25:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742109_1285 (size=349745) 2024-11-23T08:25:20,397 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:59140 2024-11-23T08:25:20,402 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:51014 2024-11-23T08:25:26,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000002/launch_container.sh] 2024-11-23T08:25:26,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000002/container_tokens] 2024-11-23T08:25:26,310 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000002/sysfs] 2024-11-23T08:25:26,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742110_1286 (size=22231) 2024-11-23T08:25:26,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742110_1286 (size=22231) 2024-11-23T08:25:26,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742110_1286 (size=22231) 2024-11-23T08:25:26,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742111_1287 (size=463) 2024-11-23T08:25:26,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742111_1287 (size=463) 2024-11-23T08:25:26,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742111_1287 (size=463) 2024-11-23T08:25:27,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742112_1288 (size=22231) 2024-11-23T08:25:27,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742112_1288 (size=22231) 2024-11-23T08:25:27,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742112_1288 (size=22231) 2024-11-23T08:25:27,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742113_1289 (size=349745) 2024-11-23T08:25:27,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742113_1289 (size=349745) 2024-11-23T08:25:27,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742113_1289 (size=349745) 2024-11-23T08:25:27,077 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:47822 2024-11-23T08:25:27,093 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732350113698_0005_01_000003 is : 143 2024-11-23T08:25:27,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000003/launch_container.sh] 2024-11-23T08:25:27,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000003/container_tokens] 2024-11-23T08:25:27,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000003/sysfs] 2024-11-23T08:25:28,294 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:25:28,295 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:25:28,297 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T08:25:28,297 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:25:28,297 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:25:28,297 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T08:25:28,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T08:25:28,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T08:25:28,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@22fc879 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T08:25:28,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T08:25:28,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T08:25:28,300 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:28,332 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:28,332 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@22fc879, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T08:25:28,334 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:25:28,338 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T08:25:28,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:28,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:28,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-18063803940608899187.jar 2024-11-23T08:25:29,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-13971402876790374419.jar 2024-11-23T08:25:29,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,327 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:29,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:25:29,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:25:29,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:25:29,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:25:29,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:25:29,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:25:29,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:25:29,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:25:29,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:25:29,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:25:29,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:25:29,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:29,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:29,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:29,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:29,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:29,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:29,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:29,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742114_1290 (size=131440) 2024-11-23T08:25:29,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742114_1290 (size=131440) 2024-11-23T08:25:29,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742114_1290 (size=131440) 2024-11-23T08:25:29,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T08:25:29,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T08:25:29,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T08:25:29,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T08:25:29,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T08:25:29,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T08:25:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742117_1293 (size=903733) 2024-11-23T08:25:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742117_1293 (size=903733) 2024-11-23T08:25:29,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742117_1293 (size=903733) 2024-11-23T08:25:29,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T08:25:29,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T08:25:29,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T08:25:29,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742119_1295 (size=6424742) 2024-11-23T08:25:29,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742119_1295 (size=6424742) 2024-11-23T08:25:29,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742119_1295 (size=6424742) 2024-11-23T08:25:29,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742120_1296 (size=1877034) 2024-11-23T08:25:29,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742120_1296 (size=1877034) 2024-11-23T08:25:29,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742120_1296 (size=1877034) 2024-11-23T08:25:29,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742121_1297 (size=77835) 2024-11-23T08:25:29,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742121_1297 (size=77835) 2024-11-23T08:25:29,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742121_1297 (size=77835) 2024-11-23T08:25:29,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742122_1298 (size=30949) 2024-11-23T08:25:29,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742122_1298 (size=30949) 2024-11-23T08:25:29,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742122_1298 (size=30949) 2024-11-23T08:25:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742123_1299 (size=1597347) 2024-11-23T08:25:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742123_1299 (size=1597347) 2024-11-23T08:25:29,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742123_1299 (size=1597347) 2024-11-23T08:25:29,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742124_1300 (size=4695811) 2024-11-23T08:25:29,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742124_1300 (size=4695811) 2024-11-23T08:25:29,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742124_1300 (size=4695811) 2024-11-23T08:25:29,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742125_1301 (size=232957) 2024-11-23T08:25:29,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742125_1301 (size=232957) 2024-11-23T08:25:29,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742125_1301 (size=232957) 2024-11-23T08:25:29,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742126_1302 (size=127628) 2024-11-23T08:25:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742126_1302 (size=127628) 2024-11-23T08:25:29,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742126_1302 (size=127628) 2024-11-23T08:25:29,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742127_1303 (size=20406) 2024-11-23T08:25:29,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742127_1303 (size=20406) 2024-11-23T08:25:29,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742127_1303 (size=20406) 2024-11-23T08:25:29,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742128_1304 (size=440957) 2024-11-23T08:25:29,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742128_1304 (size=440957) 2024-11-23T08:25:29,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742128_1304 (size=440957) 2024-11-23T08:25:29,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742129_1305 (size=5175431) 2024-11-23T08:25:29,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742129_1305 (size=5175431) 2024-11-23T08:25:29,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742129_1305 (size=5175431) 2024-11-23T08:25:29,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742130_1306 (size=217634) 2024-11-23T08:25:29,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742130_1306 (size=217634) 2024-11-23T08:25:29,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742130_1306 (size=217634) 2024-11-23T08:25:29,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742131_1307 (size=1832290) 2024-11-23T08:25:29,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742131_1307 (size=1832290) 2024-11-23T08:25:29,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742131_1307 (size=1832290) 2024-11-23T08:25:29,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742132_1308 (size=322274) 2024-11-23T08:25:29,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742132_1308 (size=322274) 2024-11-23T08:25:29,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742132_1308 (size=322274) 2024-11-23T08:25:29,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742133_1309 (size=503880) 2024-11-23T08:25:29,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742133_1309 (size=503880) 2024-11-23T08:25:29,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742133_1309 (size=503880) 2024-11-23T08:25:29,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742134_1310 (size=29229) 2024-11-23T08:25:29,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742134_1310 (size=29229) 2024-11-23T08:25:29,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742134_1310 (size=29229) 2024-11-23T08:25:29,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742135_1311 (size=24096) 2024-11-23T08:25:29,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742135_1311 (size=24096) 2024-11-23T08:25:29,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742135_1311 (size=24096) 2024-11-23T08:25:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742136_1312 (size=111872) 2024-11-23T08:25:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742136_1312 (size=111872) 2024-11-23T08:25:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742136_1312 (size=111872) 2024-11-23T08:25:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742137_1313 (size=45609) 2024-11-23T08:25:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742137_1313 (size=45609) 2024-11-23T08:25:29,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742137_1313 (size=45609) 2024-11-23T08:25:29,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742138_1314 (size=136454) 2024-11-23T08:25:29,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742138_1314 (size=136454) 2024-11-23T08:25:29,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742138_1314 (size=136454) 2024-11-23T08:25:29,653 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:25:29,655 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T08:25:29,657 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-11-23T08:25:29,657 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-11-23T08:25:29,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742139_1315 (size=441) 2024-11-23T08:25:29,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742139_1315 (size=441) 2024-11-23T08:25:29,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742139_1315 (size=441) 2024-11-23T08:25:29,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742140_1316 (size=21) 2024-11-23T08:25:29,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742140_1316 (size=21) 2024-11-23T08:25:29,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742140_1316 (size=21) 2024-11-23T08:25:29,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742141_1317 (size=304049) 2024-11-23T08:25:29,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742141_1317 (size=304049) 2024-11-23T08:25:29,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742141_1317 (size=304049) 2024-11-23T08:25:33,166 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:33,166 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:33,170 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0005_000001 (auth:SIMPLE) from 127.0.0.1:40780 2024-11-23T08:25:33,185 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000001/launch_container.sh] 2024-11-23T08:25:33,185 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000001/container_tokens] 2024-11-23T08:25:33,185 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0005/container_1732350113698_0005_01_000001/sysfs] 2024-11-23T08:25:34,105 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:50772 2024-11-23T08:25:41,228 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:46956 2024-11-23T08:25:42,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742142_1318 (size=349747) 2024-11-23T08:25:42,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742142_1318 (size=349747) 2024-11-23T08:25:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742142_1318 (size=349747) 2024-11-23T08:25:43,799 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:38644 2024-11-23T08:25:43,800 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:37126 2024-11-23T08:25:44,707 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:25:48,827 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000002/launch_container.sh] 2024-11-23T08:25:48,827 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000002/container_tokens] 2024-11-23T08:25:48,828 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000002/sysfs] 2024-11-23T08:25:49,212 WARN [regionserver/b33d9013c60a:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 0 2024-11-23T08:25:49,214 WARN [regionserver/b33d9013c60a:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-11-23T08:25:49,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742143_1319 (size=21199) 2024-11-23T08:25:49,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742143_1319 (size=21199) 2024-11-23T08:25:49,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742143_1319 (size=21199) 2024-11-23T08:25:49,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742144_1320 (size=463) 2024-11-23T08:25:49,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742144_1320 (size=463) 2024-11-23T08:25:49,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742144_1320 (size=463) 2024-11-23T08:25:49,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742145_1321 (size=21199) 2024-11-23T08:25:49,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742145_1321 (size=21199) 2024-11-23T08:25:49,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742145_1321 (size=21199) 2024-11-23T08:25:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742146_1322 (size=349747) 2024-11-23T08:25:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742146_1322 (size=349747) 2024-11-23T08:25:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742146_1322 (size=349747) 2024-11-23T08:25:49,407 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000003/launch_container.sh] 2024-11-23T08:25:49,407 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000003/container_tokens] 2024-11-23T08:25:49,407 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000003/sysfs] 2024-11-23T08:25:49,423 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:37128 2024-11-23T08:25:49,431 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:38646 2024-11-23T08:25:50,060 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0e68eb80531b116ca00181dd63104595 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:25:50,060 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 39d2d9135938bc1dceb526423ba88242 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:25:51,198 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:25:51,199 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:25:51,201 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T08:25:51,201 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:25:51,201 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:25:51,201 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T08:25:51,202 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T08:25:51,202 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T08:25:51,202 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@22fc879 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T08:25:51,202 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T08:25:51,202 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350307518/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T08:25:51,209 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0e68eb80531b116ca00181dd63104595, had cached 0 bytes from a total of 5566 2024-11-23T08:25:51,210 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 39d2d9135938bc1dceb526423ba88242, had cached 0 bytes from a total of 8052 2024-11-23T08:25:51,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-23T08:25:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T08:25:51,223 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350351223"}]},"ts":"1732350351223"} 2024-11-23T08:25:51,225 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-23T08:25:51,225 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-23T08:25:51,227 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-23T08:25:51,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, UNASSIGN}] 2024-11-23T08:25:51,230 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, UNASSIGN 2024-11-23T08:25:51,230 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, UNASSIGN 2024-11-23T08:25:51,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=39d2d9135938bc1dceb526423ba88242, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:25:51,232 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0e68eb80531b116ca00181dd63104595, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:51,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, UNASSIGN because future has completed 2024-11-23T08:25:51,235 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:51,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:25:51,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, UNASSIGN because future has completed 2024-11-23T08:25:51,238 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:51,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:51,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T08:25:51,388 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:51,388 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:25:51,388 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 39d2d9135938bc1dceb526423ba88242, disabling compactions & flushes 2024-11-23T08:25:51,388 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:51,388 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:51,388 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. after waiting 0 ms 2024-11-23T08:25:51,388 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:51,392 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 0e68eb80531b116ca00181dd63104595, disabling compactions & flushes 2024-11-23T08:25:51,392 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. after waiting 0 ms 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:51,392 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:25:51,393 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:51,393 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242. 2024-11-23T08:25:51,393 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 39d2d9135938bc1dceb526423ba88242: Waiting for close lock at 1732350351388Running coprocessor pre-close hooks at 1732350351388Disabling compacts and flushes for region at 1732350351388Disabling writes for close at 1732350351388Writing region close event to WAL at 1732350351389 (+1 ms)Running coprocessor post-close hooks at 1732350351393 (+4 ms)Closed at 1732350351393 2024-11-23T08:25:51,395 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:51,396 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=39d2d9135938bc1dceb526423ba88242, regionState=CLOSED 2024-11-23T08:25:51,396 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:25:51,396 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:51,396 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595. 2024-11-23T08:25:51,396 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 0e68eb80531b116ca00181dd63104595: Waiting for close lock at 1732350351392Running coprocessor pre-close hooks at 1732350351392Disabling compacts and flushes for region at 1732350351392Disabling writes for close at 1732350351392Writing region close event to WAL at 1732350351393 (+1 ms)Running coprocessor post-close hooks at 1732350351396 (+3 ms)Closed at 1732350351396 2024-11-23T08:25:51,399 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:51,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:25:51,399 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0e68eb80531b116ca00181dd63104595, regionState=CLOSED 2024-11-23T08:25:51,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:51,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-11-23T08:25:51,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 39d2d9135938bc1dceb526423ba88242, server=b33d9013c60a,45199,1732350106787 in 164 msec 2024-11-23T08:25:51,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=39d2d9135938bc1dceb526423ba88242, UNASSIGN in 179 msec 2024-11-23T08:25:51,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=127 2024-11-23T08:25:51,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 0e68eb80531b116ca00181dd63104595, server=b33d9013c60a,39649,1732350107049 in 169 msec 2024-11-23T08:25:51,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-11-23T08:25:51,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0e68eb80531b116ca00181dd63104595, UNASSIGN in 181 msec 2024-11-23T08:25:51,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-23T08:25:51,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 184 msec 2024-11-23T08:25:51,414 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350351414"}]},"ts":"1732350351414"} 2024-11-23T08:25:51,416 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-23T08:25:51,416 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-23T08:25:51,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 200 msec 2024-11-23T08:25:51,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T08:25:51,538 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T08:25:51,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-23T08:25:51,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,541 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-23T08:25:51,543 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,546 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-23T08:25:51,550 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:51,550 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:51,553 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/recovered.edits] 2024-11-23T08:25:51,556 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/recovered.edits] 2024-11-23T08:25:51,557 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/cf/291a96c9d05e4fd99a30f21389012821 2024-11-23T08:25:51,560 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/cf/d312f5e3c3d044bfa96dadf9638ad893 2024-11-23T08:25:51,560 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595/recovered.edits/9.seqid 2024-11-23T08:25:51,561 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/0e68eb80531b116ca00181dd63104595 2024-11-23T08:25:51,563 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242/recovered.edits/9.seqid 2024-11-23T08:25:51,563 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testConsecutiveExports/39d2d9135938bc1dceb526423ba88242 2024-11-23T08:25:51,563 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-23T08:25:51,565 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,569 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-23T08:25:51,572 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-23T08:25:51,575 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,575 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-23T08:25:51,575 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350351575"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:51,575 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350351575"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:51,577 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:25:51,578 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0e68eb80531b116ca00181dd63104595, NAME => 'testtb-testConsecutiveExports,,1732350305844.0e68eb80531b116ca00181dd63104595.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 39d2d9135938bc1dceb526423ba88242, NAME => 'testtb-testConsecutiveExports,1,1732350305844.39d2d9135938bc1dceb526423ba88242.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:25:51,578 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-23T08:25:51,578 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350351578"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:51,580 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-23T08:25:51,581 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T08:25:51,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 43 msec 2024-11-23T08:25:51,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,583 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T08:25:51,583 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T08:25:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T08:25:51,584 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:51,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T08:25:51,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:51,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-23T08:25:51,593 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-23T08:25:51,593 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T08:25:51,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T08:25:51,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-23T08:25:51,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T08:25:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-23T08:25:51,636 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=808 (was 808), OpenFileDescriptor=811 (was 811), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=750 (was 768), ProcessCount=19 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=2836 (was 3206) 2024-11-23T08:25:51,636 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-23T08:25:51,657 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=808, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=750, ProcessCount=16, AvailableMemoryMB=2913 2024-11-23T08:25:51,657 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-11-23T08:25:51,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:25:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:51,661 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:25:51,661 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:51,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-11-23T08:25:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T08:25:51,662 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:25:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742147_1323 (size=422) 2024-11-23T08:25:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742147_1323 (size=422) 2024-11-23T08:25:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742147_1323 (size=422) 2024-11-23T08:25:51,674 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 86145634754ede6b03c905ef165a7779, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:51,675 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 55ba5ccb42b30ceea0f18f37a65619c6, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742149_1325 (size=83) 2024-11-23T08:25:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742149_1325 (size=83) 2024-11-23T08:25:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742149_1325 (size=83) 2024-11-23T08:25:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742148_1324 (size=83) 2024-11-23T08:25:51,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742148_1324 (size=83) 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 55ba5ccb42b30ceea0f18f37a65619c6, disabling compactions & flushes 2024-11-23T08:25:51,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742148_1324 (size=83) 2024-11-23T08:25:51,686 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. after waiting 0 ms 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:51,686 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 55ba5ccb42b30ceea0f18f37a65619c6: Waiting for close lock at 1732350351686Disabling compacts and flushes for region at 1732350351686Disabling writes for close at 1732350351686Writing region close event to WAL at 1732350351686Closed at 1732350351686 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 86145634754ede6b03c905ef165a7779, disabling compactions & flushes 2024-11-23T08:25:51,686 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. after waiting 0 ms 2024-11-23T08:25:51,686 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:51,686 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:51,687 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 86145634754ede6b03c905ef165a7779: Waiting for close lock at 1732350351686Disabling compacts and flushes for region at 1732350351686Disabling writes for close at 1732350351686Writing region close event to WAL at 1732350351686Closed at 1732350351686 2024-11-23T08:25:51,688 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:25:51,688 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732350351688"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350351688"}]},"ts":"1732350351688"} 2024-11-23T08:25:51,688 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732350351688"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350351688"}]},"ts":"1732350351688"} 2024-11-23T08:25:51,691 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:25:51,692 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:25:51,692 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350351692"}]},"ts":"1732350351692"} 2024-11-23T08:25:51,694 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-23T08:25:51,694 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:25:51,695 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:25:51,695 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:25:51,695 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:25:51,696 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:25:51,696 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:25:51,696 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:25:51,696 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:25:51,696 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:25:51,696 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:25:51,696 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:25:51,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, ASSIGN}] 2024-11-23T08:25:51,697 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, ASSIGN 2024-11-23T08:25:51,697 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, ASSIGN 2024-11-23T08:25:51,698 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:25:51,698 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:25:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T08:25:51,848 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:25:51,849 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=55ba5ccb42b30ceea0f18f37a65619c6, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:51,849 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=86145634754ede6b03c905ef165a7779, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:51,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, ASSIGN because future has completed 2024-11-23T08:25:51,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:25:51,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, ASSIGN because future has completed 2024-11-23T08:25:51,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T08:25:52,006 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,006 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 86145634754ede6b03c905ef165a7779, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:25:52,007 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. service=AccessControlService 2024-11-23T08:25:52,007 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:52,007 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,007 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:52,007 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,007 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,009 INFO [StoreOpener-86145634754ede6b03c905ef165a7779-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,009 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,009 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 55ba5ccb42b30ceea0f18f37a65619c6, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:25:52,009 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. service=AccessControlService 2024-11-23T08:25:52,010 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:52,010 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,010 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:52,010 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,010 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,010 INFO [StoreOpener-86145634754ede6b03c905ef165a7779-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 86145634754ede6b03c905ef165a7779 columnFamilyName cf 2024-11-23T08:25:52,010 DEBUG [StoreOpener-86145634754ede6b03c905ef165a7779-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:52,010 INFO [StoreOpener-86145634754ede6b03c905ef165a7779-1 {}] regionserver.HStore(327): Store=86145634754ede6b03c905ef165a7779/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:52,011 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,011 INFO [StoreOpener-55ba5ccb42b30ceea0f18f37a65619c6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,011 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,012 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,012 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,012 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,012 INFO [StoreOpener-55ba5ccb42b30ceea0f18f37a65619c6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 55ba5ccb42b30ceea0f18f37a65619c6 columnFamilyName cf 2024-11-23T08:25:52,012 DEBUG [StoreOpener-55ba5ccb42b30ceea0f18f37a65619c6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:52,012 INFO [StoreOpener-55ba5ccb42b30ceea0f18f37a65619c6-1 {}] regionserver.HStore(327): Store=55ba5ccb42b30ceea0f18f37a65619c6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:52,013 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,013 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,013 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,013 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,014 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,014 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,015 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:52,015 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,015 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 86145634754ede6b03c905ef165a7779; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66259824, jitterRate=-0.012651681900024414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:52,016 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,016 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 86145634754ede6b03c905ef165a7779: Running coprocessor pre-open hook at 1732350352007Writing region info on filesystem at 1732350352008 (+1 ms)Initializing all the Stores at 1732350352008Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350352008Cleaning up temporary data from old regions at 1732350352012 (+4 ms)Running coprocessor post-open hooks at 1732350352016 (+4 ms)Region opened successfully at 1732350352016 2024-11-23T08:25:52,017 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779., pid=135, masterSystemTime=1732350352003 2024-11-23T08:25:52,017 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:52,017 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 55ba5ccb42b30ceea0f18f37a65619c6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67863006, jitterRate=0.01123759150505066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:52,018 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,018 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 55ba5ccb42b30ceea0f18f37a65619c6: Running coprocessor pre-open hook at 1732350352010Writing region info on filesystem at 1732350352010Initializing all the Stores at 1732350352011 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350352011Cleaning up temporary data from old regions at 1732350352014 (+3 ms)Running coprocessor post-open hooks at 1732350352018 (+4 ms)Region opened successfully at 1732350352018 2024-11-23T08:25:52,018 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6., pid=136, masterSystemTime=1732350352006 2024-11-23T08:25:52,018 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,019 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,019 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=86145634754ede6b03c905ef165a7779, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:52,020 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,020 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,020 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=55ba5ccb42b30ceea0f18f37a65619c6, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:52,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:25:52,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:52,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-23T08:25:52,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962 in 171 msec 2024-11-23T08:25:52,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-11-23T08:25:52,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049 in 171 msec 2024-11-23T08:25:52,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, ASSIGN in 327 msec 2024-11-23T08:25:52,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-23T08:25:52,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, ASSIGN in 328 msec 2024-11-23T08:25:52,039 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:25:52,039 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350352039"}]},"ts":"1732350352039"} 2024-11-23T08:25:52,041 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-23T08:25:52,042 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:25:52,042 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-23T08:25:52,046 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T08:25:52,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:52,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:52,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:52,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:52,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:52,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:52,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:52,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:52,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 433 msec 2024-11-23T08:25:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T08:25:52,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T08:25:52,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-23T08:25:52,288 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:52,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-23T08:25:52,291 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:52,292 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-23T08:25:52,292 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T08:25:52,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T08:25:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350352294 (current time:1732350352294). 2024-11-23T08:25:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:25:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T08:25:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:25:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d4224b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:52,296 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:52,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:52,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:52,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74f7eb89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:52,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:52,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,297 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:52,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bca2e59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:52,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:52,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,300 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,301 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41193106, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:52,303 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734fefe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:52,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,304 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:52,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75fcffca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:52,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:52,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,307 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,311 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,312 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T08:25:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:25:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T08:25:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T08:25:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T08:25:52,315 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:25:52,316 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:25:52,318 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:25:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742150_1326 (size=215) 2024-11-23T08:25:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742150_1326 (size=215) 2024-11-23T08:25:52,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742150_1326 (size=215) 2024-11-23T08:25:52,325 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:25:52,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6}] 2024-11-23T08:25:52,326 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,326 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T08:25:52,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-23T08:25:52,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-11-23T08:25:52,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 86145634754ede6b03c905ef165a7779: 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 55ba5ccb42b30ceea0f18f37a65619c6: 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:25:52,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:25:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742151_1327 (size=86) 2024-11-23T08:25:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742151_1327 (size=86) 2024-11-23T08:25:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742151_1327 (size=86) 2024-11-23T08:25:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742152_1328 (size=86) 2024-11-23T08:25:52,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-23T08:25:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742152_1328 (size=86) 2024-11-23T08:25:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742152_1328 (size=86) 2024-11-23T08:25:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-23T08:25:52,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,486 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T08:25:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-11-23T08:25:52,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,487 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 in 162 msec 2024-11-23T08:25:52,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-11-23T08:25:52,489 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:25:52,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 in 162 msec 2024-11-23T08:25:52,489 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:25:52,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:25:52,490 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,491 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742153_1329 (size=597) 2024-11-23T08:25:52,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742153_1329 (size=597) 2024-11-23T08:25:52,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742153_1329 (size=597) 2024-11-23T08:25:52,502 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:25:52,505 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:25:52,506 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,507 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:25:52,507 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T08:25:52,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 194 msec 2024-11-23T08:25:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T08:25:52,627 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T08:25:52,631 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0a2f60692c874603b0d58b32e72d44849', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:25:52,632 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='187ec0b3a34369e53af4b1b7aa1bdac39', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,632 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='2425a163b1ba45c131c1424294a37958a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3c9f0c3ba136a646c34aac03f330f371b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,634 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='4b8042d7bec49ceb6d5c926b59fd900d4', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:25:52,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:25:52,642 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T08:25:52,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,645 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,645 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:25:52,648 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T08:25:52,652 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T08:25:52,658 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T08:25:52,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T08:25:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350352660 (current time:1732350352660). 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44715237, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:52,662 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:52,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:52,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:52,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b4e7ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:52,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:52,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,663 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:52,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a8cffe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:52,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:52,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,665 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:52,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,667 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d352f9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:52,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:52,668 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7074542c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:52,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,669 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50576, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:52,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d0bb642, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:52,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:52,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:52,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:52,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:52,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:52,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:52,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:52,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T08:25:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:25:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T08:25:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T08:25:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T08:25:52,679 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:25:52,680 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:25:52,682 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:25:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742154_1330 (size=210) 2024-11-23T08:25:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742154_1330 (size=210) 2024-11-23T08:25:52,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742154_1330 (size=210) 2024-11-23T08:25:52,690 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:25:52,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6}] 2024-11-23T08:25:52,692 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,692 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T08:25:52,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-23T08:25:52,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-11-23T08:25:52,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,844 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 86145634754ede6b03c905ef165a7779 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T08:25:52,844 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 55ba5ccb42b30ceea0f18f37a65619c6 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T08:25:52,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/.tmp/cf/a21eff376c444bd6a51475ff98cacd2a is 71, key is 01384ae67c5a36dda3df134875646e28/cf:q/1732350352639/Put/seqid=0 2024-11-23T08:25:52,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/.tmp/cf/aaaddacfcc694ef0aa0e0b044863d5cb is 71, key is 1620106cfa3419c7a187cd83f5db6a22/cf:q/1732350352641/Put/seqid=0 2024-11-23T08:25:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742156_1332 (size=8120) 2024-11-23T08:25:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742156_1332 (size=8120) 2024-11-23T08:25:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742155_1331 (size=5492) 2024-11-23T08:25:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742155_1331 (size=5492) 2024-11-23T08:25:52,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742155_1331 (size=5492) 2024-11-23T08:25:52,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742156_1332 (size=8120) 2024-11-23T08:25:52,864 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/.tmp/cf/aaaddacfcc694ef0aa0e0b044863d5cb 2024-11-23T08:25:52,866 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/.tmp/cf/a21eff376c444bd6a51475ff98cacd2a 2024-11-23T08:25:52,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/.tmp/cf/aaaddacfcc694ef0aa0e0b044863d5cb as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb 2024-11-23T08:25:52,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/.tmp/cf/a21eff376c444bd6a51475ff98cacd2a as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a 2024-11-23T08:25:52,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb, entries=44, sequenceid=6, filesize=7.9 K 2024-11-23T08:25:52,876 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 55ba5ccb42b30ceea0f18f37a65619c6 in 32ms, sequenceid=6, compaction requested=false 2024-11-23T08:25:52,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 55ba5ccb42b30ceea0f18f37a65619c6: 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb] hfiles 2024-11-23T08:25:52,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a, entries=6, sequenceid=6, filesize=5.4 K 2024-11-23T08:25:52,886 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 86145634754ede6b03c905ef165a7779 in 42ms, sequenceid=6, compaction requested=false 2024-11-23T08:25:52,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 86145634754ede6b03c905ef165a7779: 2024-11-23T08:25:52,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T08:25:52,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:52,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a] hfiles 2024-11-23T08:25:52,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742157_1333 (size=125) 2024-11-23T08:25:52,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742157_1333 (size=125) 2024-11-23T08:25:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742158_1334 (size=125) 2024-11-23T08:25:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742158_1334 (size=125) 2024-11-23T08:25:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742158_1334 (size=125) 2024-11-23T08:25:52,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:25:52,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T08:25:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-23T08:25:52,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,896 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 2024-11-23T08:25:52,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 86145634754ede6b03c905ef165a7779 in 207 msec 2024-11-23T08:25:52,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742157_1333 (size=125) 2024-11-23T08:25:52,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:25:52,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-11-23T08:25:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-11-23T08:25:52,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,904 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:25:52,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-11-23T08:25:52,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6 in 215 msec 2024-11-23T08:25:52,908 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:25:52,909 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:25:52,910 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:25:52,910 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,911 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742159_1335 (size=675) 2024-11-23T08:25:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742159_1335 (size=675) 2024-11-23T08:25:52,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742159_1335 (size=675) 2024-11-23T08:25:52,935 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:25:52,940 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:25:52,940 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:52,942 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:25:52,942 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T08:25:52,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 265 msec 2024-11-23T08:25:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T08:25:52,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T08:25:52,999 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:25:53,000 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:25:53,000 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T08:25:53,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:25:53,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:25:53,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37426, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T08:25:53,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:25:53,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:53,005 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:25:53,005 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-11-23T08:25:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T08:25:53,006 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:25:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742160_1336 (size=399) 2024-11-23T08:25:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742160_1336 (size=399) 2024-11-23T08:25:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742160_1336 (size=399) 2024-11-23T08:25:53,019 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4516986afbb8ffd21b2bbfd62f2bd573, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:53,023 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2024ec23f364bb1556c1fc453b0c56ea, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742161_1337 (size=85) 2024-11-23T08:25:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742161_1337 (size=85) 2024-11-23T08:25:53,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742161_1337 (size=85) 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 2024ec23f364bb1556c1fc453b0c56ea, disabling compactions & flushes 2024-11-23T08:25:53,044 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. after waiting 0 ms 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,044 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,044 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2024ec23f364bb1556c1fc453b0c56ea: Waiting for close lock at 1732350353044Disabling compacts and flushes for region at 1732350353044Disabling writes for close at 1732350353044Writing region close event to WAL at 1732350353044Closed at 1732350353044 2024-11-23T08:25:53,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742162_1338 (size=85) 2024-11-23T08:25:53,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742162_1338 (size=85) 2024-11-23T08:25:53,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742162_1338 (size=85) 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 4516986afbb8ffd21b2bbfd62f2bd573, disabling compactions & flushes 2024-11-23T08:25:53,054 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. after waiting 0 ms 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,054 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,054 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4516986afbb8ffd21b2bbfd62f2bd573: Waiting for close lock at 1732350353054Disabling compacts and flushes for region at 1732350353054Disabling writes for close at 1732350353054Writing region close event to WAL at 1732350353054Closed at 1732350353054 2024-11-23T08:25:53,056 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:25:53,056 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732350353056"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350353056"}]},"ts":"1732350353056"} 2024-11-23T08:25:53,056 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732350353056"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350353056"}]},"ts":"1732350353056"} 2024-11-23T08:25:53,059 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:25:53,061 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:25:53,061 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350353061"}]},"ts":"1732350353061"} 2024-11-23T08:25:53,064 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-23T08:25:53,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:25:53,066 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:25:53,066 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:25:53,066 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:25:53,066 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:25:53,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, ASSIGN}] 2024-11-23T08:25:53,069 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, ASSIGN 2024-11-23T08:25:53,069 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, ASSIGN 2024-11-23T08:25:53,070 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:25:53,071 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:25:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T08:25:53,221 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:25:53,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=2024ec23f364bb1556c1fc453b0c56ea, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:53,221 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=4516986afbb8ffd21b2bbfd62f2bd573, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:53,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, ASSIGN because future has completed 2024-11-23T08:25:53,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:53,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, ASSIGN because future has completed 2024-11-23T08:25:53,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:25:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T08:25:53,378 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,378 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 4516986afbb8ffd21b2bbfd62f2bd573, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.', STARTKEY => '', ENDKEY => '2'} 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 2024ec23f364bb1556c1fc453b0c56ea, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.', STARTKEY => '2', ENDKEY => ''} 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. service=AccessControlService 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. service=AccessControlService 2024-11-23T08:25:53,378 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:53,378 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,378 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,379 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,379 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,380 INFO [StoreOpener-2024ec23f364bb1556c1fc453b0c56ea-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,380 INFO [StoreOpener-4516986afbb8ffd21b2bbfd62f2bd573-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,381 INFO [StoreOpener-4516986afbb8ffd21b2bbfd62f2bd573-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4516986afbb8ffd21b2bbfd62f2bd573 columnFamilyName cf 2024-11-23T08:25:53,381 INFO [StoreOpener-2024ec23f364bb1556c1fc453b0c56ea-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2024ec23f364bb1556c1fc453b0c56ea columnFamilyName cf 2024-11-23T08:25:53,381 DEBUG [StoreOpener-4516986afbb8ffd21b2bbfd62f2bd573-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,381 DEBUG [StoreOpener-2024ec23f364bb1556c1fc453b0c56ea-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,381 INFO [StoreOpener-4516986afbb8ffd21b2bbfd62f2bd573-1 {}] regionserver.HStore(327): Store=4516986afbb8ffd21b2bbfd62f2bd573/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:53,381 INFO [StoreOpener-2024ec23f364bb1556c1fc453b0c56ea-1 {}] regionserver.HStore(327): Store=2024ec23f364bb1556c1fc453b0c56ea/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:53,381 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,381 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,382 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,383 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,384 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,385 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:53,385 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:25:53,385 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 2024ec23f364bb1556c1fc453b0c56ea; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74881933, jitterRate=0.11582775413990021}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:53,385 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,385 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 4516986afbb8ffd21b2bbfd62f2bd573; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71222347, jitterRate=0.06129567325115204}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:53,385 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,386 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 4516986afbb8ffd21b2bbfd62f2bd573: Running coprocessor pre-open hook at 1732350353379Writing region info on filesystem at 1732350353379Initializing all the Stores at 1732350353379Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350353379Cleaning up temporary data from old regions at 1732350353382 (+3 ms)Running coprocessor post-open hooks at 1732350353385 (+3 ms)Region opened successfully at 1732350353386 (+1 ms) 2024-11-23T08:25:53,386 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 2024ec23f364bb1556c1fc453b0c56ea: Running coprocessor pre-open hook at 1732350353379Writing region info on filesystem at 1732350353379Initializing all the Stores at 1732350353379Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350353379Cleaning up temporary data from old regions at 1732350353382 (+3 ms)Running coprocessor post-open hooks at 1732350353385 (+3 ms)Region opened successfully at 1732350353386 (+1 ms) 2024-11-23T08:25:53,387 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573., pid=147, masterSystemTime=1732350353375 2024-11-23T08:25:53,387 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea., pid=146, masterSystemTime=1732350353375 2024-11-23T08:25:53,389 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,389 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,389 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=4516986afbb8ffd21b2bbfd62f2bd573, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:53,390 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,391 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,395 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=2024ec23f364bb1556c1fc453b0c56ea, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:53,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:25:53,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:53,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-11-23T08:25:53,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962 in 174 msec 2024-11-23T08:25:53,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, ASSIGN in 334 msec 2024-11-23T08:25:53,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-11-23T08:25:53,401 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049 in 176 msec 2024-11-23T08:25:53,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-23T08:25:53,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, ASSIGN in 335 msec 2024-11-23T08:25:53,404 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:25:53,404 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350353404"}]},"ts":"1732350353404"} 2024-11-23T08:25:53,406 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-23T08:25:53,407 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:25:53,407 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-23T08:25:53,410 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T08:25:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:53,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 472 msec 2024-11-23T08:25:53,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T08:25:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T08:25:53,637 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T08:25:53,639 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:25:53,643 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:53,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-23T08:25:53,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea] 2024-11-23T08:25:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea], force=true 2024-11-23T08:25:53,663 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea], force=true 2024-11-23T08:25:53,663 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea], force=true 2024-11-23T08:25:53,663 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea], force=true 2024-11-23T08:25:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T08:25:53,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, UNASSIGN}] 2024-11-23T08:25:53,670 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, UNASSIGN 2024-11-23T08:25:53,670 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, UNASSIGN 2024-11-23T08:25:53,670 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=2024ec23f364bb1556c1fc453b0c56ea, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:25:53,670 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=4516986afbb8ffd21b2bbfd62f2bd573, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:53,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, UNASSIGN because future has completed 2024-11-23T08:25:53,672 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:53,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:25:53,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, UNASSIGN because future has completed 2024-11-23T08:25:53,673 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T08:25:53,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:25:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T08:25:53,825 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,825 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T08:25:53,825 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 4516986afbb8ffd21b2bbfd62f2bd573, disabling compactions & flushes 2024-11-23T08:25:53,825 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,825 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,825 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. after waiting 0 ms 2024-11-23T08:25:53,825 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,825 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 4516986afbb8ffd21b2bbfd62f2bd573 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T08:25:53,825 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,826 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T08:25:53,826 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 2024ec23f364bb1556c1fc453b0c56ea, disabling compactions & flushes 2024-11-23T08:25:53,826 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,826 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,826 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. after waiting 0 ms 2024-11-23T08:25:53,826 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,826 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 2024ec23f364bb1556c1fc453b0c56ea 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T08:25:53,839 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/.tmp/cf/909c5daa00ce453fa3741f73de3f67cf is 28, key is 2/cf:/1732350353644/Put/seqid=0 2024-11-23T08:25:53,839 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/.tmp/cf/997b446890f5460f9fa59fe26f1150d0 is 28, key is 1/cf:/1732350353640/Put/seqid=0 2024-11-23T08:25:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742163_1339 (size=4945) 2024-11-23T08:25:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742164_1340 (size=4945) 2024-11-23T08:25:53,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742164_1340 (size=4945) 2024-11-23T08:25:53,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742163_1339 (size=4945) 2024-11-23T08:25:53,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742163_1339 (size=4945) 2024-11-23T08:25:53,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742164_1340 (size=4945) 2024-11-23T08:25:53,848 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/.tmp/cf/997b446890f5460f9fa59fe26f1150d0 2024-11-23T08:25:53,848 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/.tmp/cf/909c5daa00ce453fa3741f73de3f67cf 2024-11-23T08:25:53,852 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/.tmp/cf/909c5daa00ce453fa3741f73de3f67cf as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf 2024-11-23T08:25:53,852 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/.tmp/cf/997b446890f5460f9fa59fe26f1150d0 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0 2024-11-23T08:25:53,856 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T08:25:53,856 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T08:25:53,857 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 4516986afbb8ffd21b2bbfd62f2bd573 in 32ms, sequenceid=5, compaction requested=false 2024-11-23T08:25:53,857 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2024ec23f364bb1556c1fc453b0c56ea in 31ms, sequenceid=5, compaction requested=false 2024-11-23T08:25:53,857 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-23T08:25:53,857 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-23T08:25:53,860 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:25:53,861 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:25:53,861 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:53,861 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:25:53,861 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. 2024-11-23T08:25:53,861 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. 2024-11-23T08:25:53,861 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 2024ec23f364bb1556c1fc453b0c56ea: Waiting for close lock at 1732350353826Running coprocessor pre-close hooks at 1732350353826Disabling compacts and flushes for region at 1732350353826Disabling writes for close at 1732350353826Obtaining lock to block concurrent updates at 1732350353826Preparing flush snapshotting stores in 2024ec23f364bb1556c1fc453b0c56ea at 1732350353826Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732350353826Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea. at 1732350353826Flushing 2024ec23f364bb1556c1fc453b0c56ea/cf: creating writer at 1732350353826Flushing 2024ec23f364bb1556c1fc453b0c56ea/cf: appending metadata at 1732350353839 (+13 ms)Flushing 2024ec23f364bb1556c1fc453b0c56ea/cf: closing flushed file at 1732350353839Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6121b76e: reopening flushed file at 1732350353852 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2024ec23f364bb1556c1fc453b0c56ea in 31ms, sequenceid=5, compaction requested=false at 1732350353857 (+5 ms)Writing region close event to WAL at 1732350353858 (+1 ms)Running coprocessor post-close hooks at 1732350353861 (+3 ms)Closed at 1732350353861 2024-11-23T08:25:53,861 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 4516986afbb8ffd21b2bbfd62f2bd573: Waiting for close lock at 1732350353825Running coprocessor pre-close hooks at 1732350353825Disabling compacts and flushes for region at 1732350353825Disabling writes for close at 1732350353825Obtaining lock to block concurrent updates at 1732350353825Preparing flush snapshotting stores in 4516986afbb8ffd21b2bbfd62f2bd573 at 1732350353825Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732350353826 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573. at 1732350353826Flushing 4516986afbb8ffd21b2bbfd62f2bd573/cf: creating writer at 1732350353826Flushing 4516986afbb8ffd21b2bbfd62f2bd573/cf: appending metadata at 1732350353839 (+13 ms)Flushing 4516986afbb8ffd21b2bbfd62f2bd573/cf: closing flushed file at 1732350353839Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31975e55: reopening flushed file at 1732350353852 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 4516986afbb8ffd21b2bbfd62f2bd573 in 32ms, sequenceid=5, compaction requested=false at 1732350353857 (+5 ms)Writing region close event to WAL at 1732350353858 (+1 ms)Running coprocessor post-close hooks at 1732350353861 (+3 ms)Closed at 1732350353861 2024-11-23T08:25:53,863 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:25:53,863 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=4516986afbb8ffd21b2bbfd62f2bd573, regionState=CLOSED 2024-11-23T08:25:53,864 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=2024ec23f364bb1556c1fc453b0c56ea, regionState=CLOSED 2024-11-23T08:25:53,864 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:25:53,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:25:53,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:25:53,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-11-23T08:25:53,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 4516986afbb8ffd21b2bbfd62f2bd573, server=b33d9013c60a,43417,1732350106962 in 196 msec 2024-11-23T08:25:53,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-11-23T08:25:53,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 2024ec23f364bb1556c1fc453b0c56ea, server=b33d9013c60a,39649,1732350107049 in 195 msec 2024-11-23T08:25:53,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4516986afbb8ffd21b2bbfd62f2bd573, UNASSIGN in 201 msec 2024-11-23T08:25:53,872 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-11-23T08:25:53,872 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2024ec23f364bb1556c1fc453b0c56ea, UNASSIGN in 201 msec 2024-11-23T08:25:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742165_1341 (size=84) 2024-11-23T08:25:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742165_1341 (size=84) 2024-11-23T08:25:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742165_1341 (size=84) 2024-11-23T08:25:53,884 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742166_1342 (size=20) 2024-11-23T08:25:53,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742166_1342 (size=20) 2024-11-23T08:25:53,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742166_1342 (size=20) 2024-11-23T08:25:53,891 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742167_1343 (size=21) 2024-11-23T08:25:53,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742167_1343 (size=21) 2024-11-23T08:25:53,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742167_1343 (size=21) 2024-11-23T08:25:53,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742168_1344 (size=84) 2024-11-23T08:25:53,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742168_1344 (size=84) 2024-11-23T08:25:53,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742168_1344 (size=84) 2024-11-23T08:25:53,901 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:53,909 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-23T08:25:53,911 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353003.4516986afbb8ffd21b2bbfd62f2bd573.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:53,911 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732350353003.2024ec23f364bb1556c1fc453b0c56ea.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:53,911 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T08:25:53,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, ASSIGN}] 2024-11-23T08:25:53,916 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, ASSIGN 2024-11-23T08:25:53,917 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, ASSIGN; state=MERGED, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:25:53,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T08:25:54,068 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T08:25:54,068 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=53c5e9809ad23e100af624566823930a, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:54,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, ASSIGN because future has completed 2024-11-23T08:25:54,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:25:54,226 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:25:54,226 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 53c5e9809ad23e100af624566823930a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.', STARTKEY => '', ENDKEY => ''} 2024-11-23T08:25:54,226 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. service=AccessControlService 2024-11-23T08:25:54,226 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:25:54,226 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,226 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:25:54,227 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,227 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,228 INFO [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,230 INFO [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53c5e9809ad23e100af624566823930a columnFamilyName cf 2024-11-23T08:25:54,230 DEBUG [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:25:54,241 DEBUG [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/909c5daa00ce453fa3741f73de3f67cf.2024ec23f364bb1556c1fc453b0c56ea->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf-top 2024-11-23T08:25:54,246 DEBUG [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/997b446890f5460f9fa59fe26f1150d0.4516986afbb8ffd21b2bbfd62f2bd573->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0-top 2024-11-23T08:25:54,247 INFO [StoreOpener-53c5e9809ad23e100af624566823930a-1 {}] regionserver.HStore(327): Store=53c5e9809ad23e100af624566823930a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:25:54,247 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,248 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,249 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,250 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,250 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,251 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,252 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 53c5e9809ad23e100af624566823930a; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72514525, jitterRate=0.08055062592029572}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:25:54,252 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,252 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 53c5e9809ad23e100af624566823930a: Running coprocessor pre-open hook at 1732350354227Writing region info on filesystem at 1732350354227Initializing all the Stores at 1732350354228 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350354228Cleaning up temporary data from old regions at 1732350354250 (+22 ms)Running coprocessor post-open hooks at 1732350354252 (+2 ms)Region opened successfully at 1732350354252 2024-11-23T08:25:54,253 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a., pid=154, masterSystemTime=1732350354222 2024-11-23T08:25:54,253 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.,because compaction is disabled. 2024-11-23T08:25:54,255 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:25:54,255 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:25:54,259 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=53c5e9809ad23e100af624566823930a, regionState=OPEN, openSeqNum=9, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:25:54,261 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b33d9013c60a,43417,1732350106962, table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T08:25:54,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:25:54,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-23T08:25:54,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962 in 192 msec 2024-11-23T08:25:54,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-11-23T08:25:54,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, ASSIGN in 349 msec 2024-11-23T08:25:54,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[4516986afbb8ffd21b2bbfd62f2bd573, 2024ec23f364bb1556c1fc453b0c56ea], force=true in 609 msec 2024-11-23T08:25:54,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T08:25:54,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T08:25:54,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T08:25:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350354288 (current time:1732350354288). 2024-11-23T08:25:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:25:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-23T08:25:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:25:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68edd9d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:54,290 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:54,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:54,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:54,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a1ebb6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:54,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:54,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,292 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:54,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29dac1d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:54,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:54,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:54,295 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:54,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:54,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:54,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,297 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:54,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f498dbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:25:54,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:25:54,298 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d3fbb88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:25:54,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,300 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:25:54,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bc7e6d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:25:54,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:25:54,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:25:54,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:54,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:54,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:25:54,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:25:54,309 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:25:54,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:25:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:25:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:25:54,311 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:25:54,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T08:25:54,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:25:54,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T08:25:54,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T08:25:54,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T08:25:54,314 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:25:54,316 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:25:54,318 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:25:54,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742169_1345 (size=216) 2024-11-23T08:25:54,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742169_1345 (size=216) 2024-11-23T08:25:54,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742169_1345 (size=216) 2024-11-23T08:25:54,333 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:25:54,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53c5e9809ad23e100af624566823930a}] 2024-11-23T08:25:54,334 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T08:25:54,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 53c5e9809ad23e100af624566823930a: 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:25:54,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/909c5daa00ce453fa3741f73de3f67cf.2024ec23f364bb1556c1fc453b0c56ea->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf-top, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/997b446890f5460f9fa59fe26f1150d0.4516986afbb8ffd21b2bbfd62f2bd573->hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0-top] hfiles 2024-11-23T08:25:54,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/909c5daa00ce453fa3741f73de3f67cf.2024ec23f364bb1556c1fc453b0c56ea for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/997b446890f5460f9fa59fe26f1150d0.4516986afbb8ffd21b2bbfd62f2bd573 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742170_1346 (size=269) 2024-11-23T08:25:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742170_1346 (size=269) 2024-11-23T08:25:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742170_1346 (size=269) 2024-11-23T08:25:54,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:25:54,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-23T08:25:54,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-11-23T08:25:54,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,506 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53c5e9809ad23e100af624566823930a 2024-11-23T08:25:54,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-11-23T08:25:54,509 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:25:54,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 53c5e9809ad23e100af624566823930a in 174 msec 2024-11-23T08:25:54,510 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:25:54,511 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:25:54,511 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,512 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742171_1347 (size=670) 2024-11-23T08:25:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742171_1347 (size=670) 2024-11-23T08:25:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742171_1347 (size=670) 2024-11-23T08:25:54,527 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:25:54,533 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:25:54,533 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,535 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:25:54,535 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T08:25:54,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 223 msec 2024-11-23T08:25:54,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T08:25:54,628 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T08:25:54,628 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628 2024-11-23T08:25:54,628 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:54,655 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:25:54,655 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,657 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:25:54,662 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742173_1349 (size=670) 2024-11-23T08:25:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742173_1349 (size=670) 2024-11-23T08:25:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742173_1349 (size=670) 2024-11-23T08:25:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742172_1348 (size=216) 2024-11-23T08:25:54,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742172_1348 (size=216) 2024-11-23T08:25:54,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742172_1348 (size=216) 2024-11-23T08:25:54,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:54,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:54,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,510 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0006_000001 (auth:SIMPLE) from 127.0.0.1:57010 2024-11-23T08:25:55,518 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000001/launch_container.sh] 2024-11-23T08:25:55,518 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000001/container_tokens] 2024-11-23T08:25:55,518 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0006/container_1732350113698_0006_01_000001/sysfs] 2024-11-23T08:25:55,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-887051034927603490.jar 2024-11-23T08:25:55,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-13269464455697800640.jar 2024-11-23T08:25:55,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,774 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,775 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:25:55,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:25:55,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:25:55,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:25:55,776 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:25:55,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:25:55,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:25:55,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:25:55,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:25:55,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:25:55,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:25:55,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:25:55,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:55,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:55,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:55,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:55,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:25:55,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:55,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:25:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742174_1350 (size=131440) 2024-11-23T08:25:55,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742174_1350 (size=131440) 2024-11-23T08:25:55,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742174_1350 (size=131440) 2024-11-23T08:25:55,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T08:25:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T08:25:55,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T08:25:55,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T08:25:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T08:25:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T08:25:55,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742177_1353 (size=903733) 2024-11-23T08:25:55,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742177_1353 (size=903733) 2024-11-23T08:25:55,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742177_1353 (size=903733) 2024-11-23T08:25:55,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T08:25:55,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T08:25:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T08:25:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742179_1355 (size=1877034) 2024-11-23T08:25:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742179_1355 (size=1877034) 2024-11-23T08:25:56,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742179_1355 (size=1877034) 2024-11-23T08:25:56,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742180_1356 (size=77835) 2024-11-23T08:25:56,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742180_1356 (size=77835) 2024-11-23T08:25:56,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742180_1356 (size=77835) 2024-11-23T08:25:56,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742181_1357 (size=30949) 2024-11-23T08:25:56,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742181_1357 (size=30949) 2024-11-23T08:25:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742181_1357 (size=30949) 2024-11-23T08:25:56,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742182_1358 (size=1597347) 2024-11-23T08:25:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742182_1358 (size=1597347) 2024-11-23T08:25:56,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742182_1358 (size=1597347) 2024-11-23T08:25:56,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742183_1359 (size=4695811) 2024-11-23T08:25:56,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742183_1359 (size=4695811) 2024-11-23T08:25:56,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742183_1359 (size=4695811) 2024-11-23T08:25:56,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742184_1360 (size=232957) 2024-11-23T08:25:56,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742184_1360 (size=232957) 2024-11-23T08:25:56,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742184_1360 (size=232957) 2024-11-23T08:25:56,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742185_1361 (size=127628) 2024-11-23T08:25:56,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742185_1361 (size=127628) 2024-11-23T08:25:56,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742185_1361 (size=127628) 2024-11-23T08:25:56,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742186_1362 (size=20406) 2024-11-23T08:25:56,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742186_1362 (size=20406) 2024-11-23T08:25:56,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742186_1362 (size=20406) 2024-11-23T08:25:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742187_1363 (size=6424742) 2024-11-23T08:25:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742187_1363 (size=6424742) 2024-11-23T08:25:56,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742187_1363 (size=6424742) 2024-11-23T08:25:56,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:25:56,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T08:25:56,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:25:56,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-23T08:25:56,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T08:25:56,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T08:25:56,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T08:25:56,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T08:25:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742189_1365 (size=217634) 2024-11-23T08:25:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742189_1365 (size=217634) 2024-11-23T08:25:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742189_1365 (size=217634) 2024-11-23T08:25:56,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T08:25:56,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T08:25:56,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T08:25:56,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742191_1367 (size=322274) 2024-11-23T08:25:56,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742191_1367 (size=322274) 2024-11-23T08:25:56,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742191_1367 (size=322274) 2024-11-23T08:25:56,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742192_1368 (size=503880) 2024-11-23T08:25:56,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742192_1368 (size=503880) 2024-11-23T08:25:56,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742192_1368 (size=503880) 2024-11-23T08:25:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742193_1369 (size=440957) 2024-11-23T08:25:56,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742193_1369 (size=440957) 2024-11-23T08:25:56,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742193_1369 (size=440957) 2024-11-23T08:25:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742194_1370 (size=29229) 2024-11-23T08:25:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742194_1370 (size=29229) 2024-11-23T08:25:56,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742194_1370 (size=29229) 2024-11-23T08:25:56,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742195_1371 (size=24096) 2024-11-23T08:25:56,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742195_1371 (size=24096) 2024-11-23T08:25:56,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742195_1371 (size=24096) 2024-11-23T08:25:56,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742196_1372 (size=111872) 2024-11-23T08:25:56,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742196_1372 (size=111872) 2024-11-23T08:25:56,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742196_1372 (size=111872) 2024-11-23T08:25:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742197_1373 (size=45609) 2024-11-23T08:25:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742197_1373 (size=45609) 2024-11-23T08:25:56,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742197_1373 (size=45609) 2024-11-23T08:25:56,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742198_1374 (size=136454) 2024-11-23T08:25:56,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742198_1374 (size=136454) 2024-11-23T08:25:56,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742198_1374 (size=136454) 2024-11-23T08:25:56,676 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:25:56,681 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-23T08:25:56,684 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-11-23T08:25:56,684 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-11-23T08:25:56,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742199_1375 (size=481) 2024-11-23T08:25:56,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742199_1375 (size=481) 2024-11-23T08:25:56,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742199_1375 (size=481) 2024-11-23T08:25:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742200_1376 (size=21) 2024-11-23T08:25:56,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742200_1376 (size=21) 2024-11-23T08:25:56,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742200_1376 (size=21) 2024-11-23T08:25:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742201_1377 (size=304057) 2024-11-23T08:25:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742201_1377 (size=304057) 2024-11-23T08:25:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742201_1377 (size=304057) 2024-11-23T08:25:56,941 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:25:57,187 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:57,187 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:25:57,394 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:55666 2024-11-23T08:26:03,190 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:59874 2024-11-23T08:26:03,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742202_1378 (size=349755) 2024-11-23T08:26:03,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742202_1378 (size=349755) 2024-11-23T08:26:03,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742202_1378 (size=349755) 2024-11-23T08:26:05,714 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:40462 2024-11-23T08:26:05,714 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:47946 2024-11-23T08:26:09,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742203_1379 (size=4945) 2024-11-23T08:26:09,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742203_1379 (size=4945) 2024-11-23T08:26:09,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742203_1379 (size=4945) 2024-11-23T08:26:10,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000002/launch_container.sh] 2024-11-23T08:26:10,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000002/container_tokens] 2024-11-23T08:26:10,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000002/sysfs] 2024-11-23T08:26:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742205_1381 (size=4945) 2024-11-23T08:26:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742205_1381 (size=4945) 2024-11-23T08:26:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742205_1381 (size=4945) 2024-11-23T08:26:11,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742204_1380 (size=22246) 2024-11-23T08:26:11,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742204_1380 (size=22246) 2024-11-23T08:26:11,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742204_1380 (size=22246) 2024-11-23T08:26:11,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742206_1382 (size=482) 2024-11-23T08:26:11,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742206_1382 (size=482) 2024-11-23T08:26:11,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742206_1382 (size=482) 2024-11-23T08:26:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742207_1383 (size=22246) 2024-11-23T08:26:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742207_1383 (size=22246) 2024-11-23T08:26:11,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742207_1383 (size=22246) 2024-11-23T08:26:11,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000003/launch_container.sh] 2024-11-23T08:26:11,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000003/container_tokens] 2024-11-23T08:26:11,174 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000003/sysfs] 2024-11-23T08:26:11,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742208_1384 (size=349755) 2024-11-23T08:26:11,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742208_1384 (size=349755) 2024-11-23T08:26:11,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742208_1384 (size=349755) 2024-11-23T08:26:11,191 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:47956 2024-11-23T08:26:12,608 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:26:12,772 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:26:12,780 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:12,780 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:26:12,780 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:26:12,780 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:12,781 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T08:26:12,781 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T08:26:12,781 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:12,781 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T08:26:12,781 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350354628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T08:26:12,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T08:26:12,793 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350372792"}]},"ts":"1732350372792"} 2024-11-23T08:26:12,795 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-23T08:26:12,795 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-23T08:26:12,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-23T08:26:12,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, UNASSIGN}] 2024-11-23T08:26:12,801 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, UNASSIGN 2024-11-23T08:26:12,802 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=53c5e9809ad23e100af624566823930a, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:12,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, UNASSIGN because future has completed 2024-11-23T08:26:12,805 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:12,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T08:26:12,958 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 53c5e9809ad23e100af624566823930a 2024-11-23T08:26:12,958 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:12,958 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 53c5e9809ad23e100af624566823930a, disabling compactions & flushes 2024-11-23T08:26:12,958 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:26:12,958 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:26:12,958 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. after waiting 0 ms 2024-11-23T08:26:12,958 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:26:12,962 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-23T08:26:12,963 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:12,963 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a. 2024-11-23T08:26:12,963 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 53c5e9809ad23e100af624566823930a: Waiting for close lock at 1732350372958Running coprocessor pre-close hooks at 1732350372958Disabling compacts and flushes for region at 1732350372958Disabling writes for close at 1732350372958Writing region close event to WAL at 1732350372959 (+1 ms)Running coprocessor post-close hooks at 1732350372963 (+4 ms)Closed at 1732350372963 2024-11-23T08:26:12,965 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 53c5e9809ad23e100af624566823930a 2024-11-23T08:26:12,966 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=53c5e9809ad23e100af624566823930a, regionState=CLOSED 2024-11-23T08:26:12,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:12,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T08:26:12,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 53c5e9809ad23e100af624566823930a, server=b33d9013c60a,43417,1732350106962 in 163 msec 2024-11-23T08:26:12,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-11-23T08:26:12,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=53c5e9809ad23e100af624566823930a, UNASSIGN in 170 msec 2024-11-23T08:26:12,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-23T08:26:12,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 175 msec 2024-11-23T08:26:12,974 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350372974"}]},"ts":"1732350372974"} 2024-11-23T08:26:12,975 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-23T08:26:12,975 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-23T08:26:12,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 187 msec 2024-11-23T08:26:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T08:26:13,107 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T08:26:13,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,110 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,110 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,113 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,113 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a 2024-11-23T08:26:13,113 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:26:13,113 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:26:13,114 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/recovered.edits] 2024-11-23T08:26:13,114 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/recovered.edits] 2024-11-23T08:26:13,114 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/recovered.edits] 2024-11-23T08:26:13,117 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/cf/997b446890f5460f9fa59fe26f1150d0 2024-11-23T08:26:13,117 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/909c5daa00ce453fa3741f73de3f67cf.2024ec23f364bb1556c1fc453b0c56ea to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/909c5daa00ce453fa3741f73de3f67cf.2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:26:13,117 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/cf/909c5daa00ce453fa3741f73de3f67cf 2024-11-23T08:26:13,118 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/997b446890f5460f9fa59fe26f1150d0.4516986afbb8ffd21b2bbfd62f2bd573 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/cf/997b446890f5460f9fa59fe26f1150d0.4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:26:13,120 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/recovered.edits/8.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea/recovered.edits/8.seqid 2024-11-23T08:26:13,120 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/recovered.edits/8.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573/recovered.edits/8.seqid 2024-11-23T08:26:13,120 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4516986afbb8ffd21b2bbfd62f2bd573 2024-11-23T08:26:13,120 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2024ec23f364bb1556c1fc453b0c56ea 2024-11-23T08:26:13,121 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/recovered.edits/12.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a/recovered.edits/12.seqid 2024-11-23T08:26:13,121 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/53c5e9809ad23e100af624566823930a 2024-11-23T08:26:13,121 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-23T08:26:13,123 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,125 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-23T08:26:13,127 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-23T08:26:13,128 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,128 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-23T08:26:13,129 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350373128"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:13,131 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-23T08:26:13,131 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 53c5e9809ad23e100af624566823930a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T08:26:13,131 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-23T08:26:13,131 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350373131"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:13,133 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-23T08:26:13,133 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,134 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 25 msec 2024-11-23T08:26:13,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T08:26:13,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T08:26:13,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T08:26:13,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-23T08:26:13,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:13,148 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,148 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T08:26:13,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:13,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:13,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T08:26:13,151 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350373151"}]},"ts":"1732350373151"} 2024-11-23T08:26:13,152 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-23T08:26:13,152 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-23T08:26:13,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-23T08:26:13,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, UNASSIGN}] 2024-11-23T08:26:13,155 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, UNASSIGN 2024-11-23T08:26:13,155 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, UNASSIGN 2024-11-23T08:26:13,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=55ba5ccb42b30ceea0f18f37a65619c6, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:13,155 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=86145634754ede6b03c905ef165a7779, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:13,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, UNASSIGN because future has completed 2024-11-23T08:26:13,157 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:13,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:13,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, UNASSIGN because future has completed 2024-11-23T08:26:13,157 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:13,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T08:26:13,309 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:26:13,309 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:13,309 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 55ba5ccb42b30ceea0f18f37a65619c6, disabling compactions & flushes 2024-11-23T08:26:13,309 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:26:13,309 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:26:13,309 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. after waiting 0 ms 2024-11-23T08:26:13,309 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:26:13,310 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 86145634754ede6b03c905ef165a7779 2024-11-23T08:26:13,310 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:13,310 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 86145634754ede6b03c905ef165a7779, disabling compactions & flushes 2024-11-23T08:26:13,310 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:26:13,310 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:26:13,310 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. after waiting 0 ms 2024-11-23T08:26:13,310 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:26:13,313 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:13,313 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:13,314 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:13,314 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:13,314 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779. 2024-11-23T08:26:13,314 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6. 2024-11-23T08:26:13,314 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 86145634754ede6b03c905ef165a7779: Waiting for close lock at 1732350373310Running coprocessor pre-close hooks at 1732350373310Disabling compacts and flushes for region at 1732350373310Disabling writes for close at 1732350373310Writing region close event to WAL at 1732350373311 (+1 ms)Running coprocessor post-close hooks at 1732350373314 (+3 ms)Closed at 1732350373314 2024-11-23T08:26:13,314 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 55ba5ccb42b30ceea0f18f37a65619c6: Waiting for close lock at 1732350373309Running coprocessor pre-close hooks at 1732350373309Disabling compacts and flushes for region at 1732350373309Disabling writes for close at 1732350373309Writing region close event to WAL at 1732350373310 (+1 ms)Running coprocessor post-close hooks at 1732350373314 (+4 ms)Closed at 1732350373314 2024-11-23T08:26:13,315 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:26:13,316 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=55ba5ccb42b30ceea0f18f37a65619c6, regionState=CLOSED 2024-11-23T08:26:13,316 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 86145634754ede6b03c905ef165a7779 2024-11-23T08:26:13,316 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=86145634754ede6b03c905ef165a7779, regionState=CLOSED 2024-11-23T08:26:13,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:13,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:13,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-23T08:26:13,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-11-23T08:26:13,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 86145634754ede6b03c905ef165a7779, server=b33d9013c60a,43417,1732350106962 in 160 msec 2024-11-23T08:26:13,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 55ba5ccb42b30ceea0f18f37a65619c6, server=b33d9013c60a,39649,1732350107049 in 161 msec 2024-11-23T08:26:13,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=55ba5ccb42b30ceea0f18f37a65619c6, UNASSIGN in 165 msec 2024-11-23T08:26:13,321 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-11-23T08:26:13,321 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=86145634754ede6b03c905ef165a7779, UNASSIGN in 166 msec 2024-11-23T08:26:13,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-23T08:26:13,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 168 msec 2024-11-23T08:26:13,324 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350373323"}]},"ts":"1732350373323"} 2024-11-23T08:26:13,325 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-23T08:26:13,325 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-23T08:26:13,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 177 msec 2024-11-23T08:26:13,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T08:26:13,467 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T08:26:13,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,469 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,470 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,474 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:26:13,474 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779 2024-11-23T08:26:13,476 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/recovered.edits] 2024-11-23T08:26:13,476 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/recovered.edits] 2024-11-23T08:26:13,482 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/cf/a21eff376c444bd6a51475ff98cacd2a 2024-11-23T08:26:13,482 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/cf/aaaddacfcc694ef0aa0e0b044863d5cb 2024-11-23T08:26:13,484 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779/recovered.edits/9.seqid 2024-11-23T08:26:13,484 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6/recovered.edits/9.seqid 2024-11-23T08:26:13,484 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/86145634754ede6b03c905ef165a7779 2024-11-23T08:26:13,485 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithMergeRegion/55ba5ccb42b30ceea0f18f37a65619c6 2024-11-23T08:26:13,485 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-23T08:26:13,486 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,489 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-23T08:26:13,491 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-23T08:26:13,493 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,493 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-23T08:26:13,493 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350373493"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:13,493 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350373493"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:13,495 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:26:13,495 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 86145634754ede6b03c905ef165a7779, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732350351658.86145634754ede6b03c905ef165a7779.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 55ba5ccb42b30ceea0f18f37a65619c6, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732350351658.55ba5ccb42b30ceea0f18f37a65619c6.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:26:13,495 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-23T08:26:13,496 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350373495"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:13,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T08:26:13,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T08:26:13,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T08:26:13,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T08:26:13,499 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-23T08:26:13,500 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 32 msec 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:13,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-23T08:26:13,507 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,507 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T08:26:13,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T08:26:13,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T08:26:13,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:13,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-23T08:26:13,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:13,547 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=817 (was 808) Potentially hanging thread: Thread-5872 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 132349) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:38366 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:44680 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1403869411_1 at /127.0.0.1:45634 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:38251 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:33222 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1403869411_1 at /127.0.0.1:42990 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 811) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=773 (was 750) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 16) - ProcessCount LEAK? -, AvailableMemoryMB=2486 (was 2913) 2024-11-23T08:26:13,547 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-11-23T08:26:13,564 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=817, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=773, ProcessCount=19, AvailableMemoryMB=2486 2024-11-23T08:26:13,564 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-11-23T08:26:13,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:26:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:13,568 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:26:13,568 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:13,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-11-23T08:26:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T08:26:13,569 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:26:13,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742209_1385 (size=407) 2024-11-23T08:26:13,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742209_1385 (size=407) 2024-11-23T08:26:13,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742209_1385 (size=407) 2024-11-23T08:26:13,576 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b44b16770b6526638d49bb879fe3ddcb, NAME => 'testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:13,576 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fddfaf52409cd47bf85d7a5802de7d22, NAME => 'testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742210_1386 (size=68) 2024-11-23T08:26:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742210_1386 (size=68) 2024-11-23T08:26:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742210_1386 (size=68) 2024-11-23T08:26:13,588 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:13,589 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing b44b16770b6526638d49bb879fe3ddcb, disabling compactions & flushes 2024-11-23T08:26:13,589 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,589 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,589 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. after waiting 0 ms 2024-11-23T08:26:13,589 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,589 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,589 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for b44b16770b6526638d49bb879fe3ddcb: Waiting for close lock at 1732350373589Disabling compacts and flushes for region at 1732350373589Disabling writes for close at 1732350373589Writing region close event to WAL at 1732350373589Closed at 1732350373589 2024-11-23T08:26:13,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742211_1387 (size=68) 2024-11-23T08:26:13,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742211_1387 (size=68) 2024-11-23T08:26:13,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742211_1387 (size=68) 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing fddfaf52409cd47bf85d7a5802de7d22, disabling compactions & flushes 2024-11-23T08:26:13,595 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. after waiting 0 ms 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,595 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,595 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for fddfaf52409cd47bf85d7a5802de7d22: Waiting for close lock at 1732350373595Disabling compacts and flushes for region at 1732350373595Disabling writes for close at 1732350373595Writing region close event to WAL at 1732350373595Closed at 1732350373595 2024-11-23T08:26:13,596 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:26:13,596 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732350373596"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350373596"}]},"ts":"1732350373596"} 2024-11-23T08:26:13,596 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732350373596"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350373596"}]},"ts":"1732350373596"} 2024-11-23T08:26:13,598 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:26:13,599 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:26:13,599 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350373599"}]},"ts":"1732350373599"} 2024-11-23T08:26:13,601 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T08:26:13,601 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:26:13,602 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:26:13,602 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:26:13,602 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:26:13,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:26:13,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, ASSIGN}] 2024-11-23T08:26:13,603 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, ASSIGN 2024-11-23T08:26:13,603 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, ASSIGN 2024-11-23T08:26:13,604 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:26:13,604 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T08:26:13,754 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:26:13,755 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b44b16770b6526638d49bb879fe3ddcb, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:13,755 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=fddfaf52409cd47bf85d7a5802de7d22, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:13,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, ASSIGN because future has completed 2024-11-23T08:26:13,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:13,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, ASSIGN because future has completed 2024-11-23T08:26:13,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T08:26:13,913 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,913 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => b44b16770b6526638d49bb879fe3ddcb, NAME => 'testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:26:13,913 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. service=AccessControlService 2024-11-23T08:26:13,913 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,914 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => fddfaf52409cd47bf85d7a5802de7d22, NAME => 'testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:26:13,914 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. service=AccessControlService 2024-11-23T08:26:13,915 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:13,915 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,915 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:13,915 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,915 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,915 INFO [StoreOpener-b44b16770b6526638d49bb879fe3ddcb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,916 INFO [StoreOpener-fddfaf52409cd47bf85d7a5802de7d22-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,916 INFO [StoreOpener-b44b16770b6526638d49bb879fe3ddcb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b44b16770b6526638d49bb879fe3ddcb columnFamilyName cf 2024-11-23T08:26:13,916 DEBUG [StoreOpener-b44b16770b6526638d49bb879fe3ddcb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:13,917 INFO [StoreOpener-b44b16770b6526638d49bb879fe3ddcb-1 {}] regionserver.HStore(327): Store=b44b16770b6526638d49bb879fe3ddcb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:13,917 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,917 INFO [StoreOpener-fddfaf52409cd47bf85d7a5802de7d22-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fddfaf52409cd47bf85d7a5802de7d22 columnFamilyName cf 2024-11-23T08:26:13,917 DEBUG [StoreOpener-fddfaf52409cd47bf85d7a5802de7d22-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:13,917 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,917 INFO [StoreOpener-fddfaf52409cd47bf85d7a5802de7d22-1 {}] regionserver.HStore(327): Store=fddfaf52409cd47bf85d7a5802de7d22/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,918 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,919 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,919 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,919 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,920 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,921 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:13,922 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:13,922 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened b44b16770b6526638d49bb879fe3ddcb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67225867, jitterRate=0.0017434805631637573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:13,922 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened fddfaf52409cd47bf85d7a5802de7d22; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74284467, jitterRate=0.10692481696605682}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:13,922 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:13,922 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:13,923 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for fddfaf52409cd47bf85d7a5802de7d22: Running coprocessor pre-open hook at 1732350373915Writing region info on filesystem at 1732350373915Initializing all the Stores at 1732350373916 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350373916Cleaning up temporary data from old regions at 1732350373919 (+3 ms)Running coprocessor post-open hooks at 1732350373922 (+3 ms)Region opened successfully at 1732350373923 (+1 ms) 2024-11-23T08:26:13,923 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for b44b16770b6526638d49bb879fe3ddcb: Running coprocessor pre-open hook at 1732350373914Writing region info on filesystem at 1732350373914Initializing all the Stores at 1732350373915 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350373915Cleaning up temporary data from old regions at 1732350373918 (+3 ms)Running coprocessor post-open hooks at 1732350373922 (+4 ms)Region opened successfully at 1732350373923 (+1 ms) 2024-11-23T08:26:13,923 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb., pid=172, masterSystemTime=1732350373910 2024-11-23T08:26:13,923 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22., pid=173, masterSystemTime=1732350373912 2024-11-23T08:26:13,925 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,925 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:13,925 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=fddfaf52409cd47bf85d7a5802de7d22, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:13,926 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,926 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:13,927 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b44b16770b6526638d49bb879fe3ddcb, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:13,927 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-23T08:26:13,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:13,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:13,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=171 2024-11-23T08:26:13,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962 in 170 msec 2024-11-23T08:26:13,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=170 2024-11-23T08:26:13,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, ASSIGN in 328 msec 2024-11-23T08:26:13,931 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049 in 172 msec 2024-11-23T08:26:13,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T08:26:13,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, ASSIGN in 329 msec 2024-11-23T08:26:13,933 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:26:13,933 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350373933"}]},"ts":"1732350373933"} 2024-11-23T08:26:13,935 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T08:26:13,935 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:26:13,936 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T08:26:13,938 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T08:26:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:14,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:14,013 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:14,014 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:14,015 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:14,015 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:14,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 448 msec 2024-11-23T08:26:14,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T08:26:14,197 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T08:26:14,198 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-23T08:26:14,198 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:14,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-23T08:26:14,201 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:14,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-23T08:26:14,202 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:14,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350374205 (current time:1732350374205). 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4968c169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:14,206 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:14,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:14,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:14,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26fd3fda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:14,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:14,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,208 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:14,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a7bfbf0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:14,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:14,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,210 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,211 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a4feae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:14,213 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b401907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:14,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,214 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60330, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:14,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bebd2f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:14,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:14,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,217 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:14,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,219 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,220 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,220 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T08:26:14,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T08:26:14,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T08:26:14,223 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:14,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T08:26:14,224 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:14,225 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742212_1388 (size=170) 2024-11-23T08:26:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742212_1388 (size=170) 2024-11-23T08:26:14,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742212_1388 (size=170) 2024-11-23T08:26:14,232 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:14,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22}] 2024-11-23T08:26:14,233 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:14,233 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:14,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T08:26:14,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-11-23T08:26:14,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for fddfaf52409cd47bf85d7a5802de7d22: 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for b44b16770b6526638d49bb879fe3ddcb: 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:14,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742213_1389 (size=71) 2024-11-23T08:26:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742213_1389 (size=71) 2024-11-23T08:26:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742214_1390 (size=71) 2024-11-23T08:26:14,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:14,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-23T08:26:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742214_1390 (size=71) 2024-11-23T08:26:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742213_1389 (size=71) 2024-11-23T08:26:14,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742214_1390 (size=71) 2024-11-23T08:26:14,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-11-23T08:26:14,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:14,392 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:14,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 in 161 msec 2024-11-23T08:26:14,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T08:26:14,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:26:14,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:14,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-11-23T08:26:14,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-11-23T08:26:14,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:14,793 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:14,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-11-23T08:26:14,796 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:14,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb in 562 msec 2024-11-23T08:26:14,796 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:14,797 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:14,797 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:14,797 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742215_1391 (size=552) 2024-11-23T08:26:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742215_1391 (size=552) 2024-11-23T08:26:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742215_1391 (size=552) 2024-11-23T08:26:14,808 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:14,812 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:14,812 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:14,814 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:14,814 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T08:26:14,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 593 msec 2024-11-23T08:26:14,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T08:26:14,847 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T08:26:14,850 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='050b384513a329aec100be0a4a8ee0e44', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:14,851 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1f3e0e44f9e40b44d7966bf96173bf28f', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:14,852 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='227b8a3fce1e78cc086eb3cf5321f6121', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:14,852 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='36e62acc9e4ab6c176139823026a38164', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:14,853 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4bb5853d1f2f9d45754090561283abb66', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:14,857 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:14,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:14,861 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:14,863 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-23T08:26:14,863 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:14,863 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:14,865 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:14,869 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:14,874 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:14,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T08:26:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350374876 (current time:1732350374876). 2024-11-23T08:26:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T08:26:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:14,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@679f6405, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:14,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:14,878 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6783de1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:14,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,879 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:14,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73662ed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:14,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:14,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,881 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,882 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:14,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78dbc134, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:14,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:14,883 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:14,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:14,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:14,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fe828ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:14,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:14,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,884 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:14,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e2e83d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:14,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:14,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,887 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:14,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:14,889 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:14,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:14,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:14,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:14,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:14,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T08:26:14,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:14,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T08:26:14,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T08:26:14,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T08:26:14,892 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:14,893 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:14,895 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:14,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742216_1392 (size=165) 2024-11-23T08:26:14,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742216_1392 (size=165) 2024-11-23T08:26:14,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742216_1392 (size=165) 2024-11-23T08:26:14,901 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:14,901 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22}] 2024-11-23T08:26:14,902 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:14,902 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:14,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T08:26:15,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-11-23T08:26:15,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-11-23T08:26:15,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:15,053 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:15,054 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing b44b16770b6526638d49bb879fe3ddcb 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T08:26:15,054 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing fddfaf52409cd47bf85d7a5802de7d22 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T08:26:15,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/.tmp/cf/311013e515da41c788db445a6aa1765b is 71, key is 023a68dfd5e1ef1dabc5cb9de2a14e8b/cf:q/1732350374856/Put/seqid=0 2024-11-23T08:26:15,073 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/.tmp/cf/3703de055a744a39b1b8f35e2345a955 is 71, key is 15abbfa9a94e17f2f686e8aaaf2e7c15/cf:q/1732350374859/Put/seqid=0 2024-11-23T08:26:15,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742217_1393 (size=5286) 2024-11-23T08:26:15,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742217_1393 (size=5286) 2024-11-23T08:26:15,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742218_1394 (size=8324) 2024-11-23T08:26:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742218_1394 (size=8324) 2024-11-23T08:26:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742217_1393 (size=5286) 2024-11-23T08:26:15,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/.tmp/cf/311013e515da41c788db445a6aa1765b 2024-11-23T08:26:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742218_1394 (size=8324) 2024-11-23T08:26:15,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/.tmp/cf/3703de055a744a39b1b8f35e2345a955 2024-11-23T08:26:15,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/.tmp/cf/311013e515da41c788db445a6aa1765b as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b 2024-11-23T08:26:15,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/.tmp/cf/3703de055a744a39b1b8f35e2345a955 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955 2024-11-23T08:26:15,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b, entries=3, sequenceid=6, filesize=5.2 K 2024-11-23T08:26:15,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955, entries=47, sequenceid=6, filesize=8.1 K 2024-11-23T08:26:15,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for b44b16770b6526638d49bb879fe3ddcb in 34ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:15,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for fddfaf52409cd47bf85d7a5802de7d22 in 34ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for b44b16770b6526638d49bb879fe3ddcb: 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for fddfaf52409cd47bf85d7a5802de7d22: 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955] hfiles 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b] hfiles 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742219_1395 (size=110) 2024-11-23T08:26:15,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742219_1395 (size=110) 2024-11-23T08:26:15,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742219_1395 (size=110) 2024-11-23T08:26:15,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:15,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-11-23T08:26:15,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-11-23T08:26:15,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:15,105 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:15,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fddfaf52409cd47bf85d7a5802de7d22 in 204 msec 2024-11-23T08:26:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742220_1396 (size=110) 2024-11-23T08:26:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742220_1396 (size=110) 2024-11-23T08:26:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742220_1396 (size=110) 2024-11-23T08:26:15,109 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:15,109 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-23T08:26:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-11-23T08:26:15,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:15,109 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:15,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-11-23T08:26:15,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b44b16770b6526638d49bb879fe3ddcb in 209 msec 2024-11-23T08:26:15,111 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:15,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:15,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:15,112 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,113 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742221_1397 (size=630) 2024-11-23T08:26:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742221_1397 (size=630) 2024-11-23T08:26:15,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742221_1397 (size=630) 2024-11-23T08:26:15,128 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:15,132 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:15,133 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:15,134 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:15,134 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T08:26:15,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 243 msec 2024-11-23T08:26:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T08:26:15,207 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T08:26:15,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:26:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-23T08:26:15,210 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:26:15,210 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:15,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-11-23T08:26:15,211 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:26:15,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T08:26:15,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742222_1398 (size=400) 2024-11-23T08:26:15,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742222_1398 (size=400) 2024-11-23T08:26:15,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742222_1398 (size=400) 2024-11-23T08:26:15,219 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 31a9b15322d53a7b44d2ddb2b2ab3b79, NAME => 'testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:15,220 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d68b63a66937da30826ba80d223788e3, NAME => 'testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:15,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742224_1400 (size=61) 2024-11-23T08:26:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742223_1399 (size=61) 2024-11-23T08:26:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742224_1400 (size=61) 2024-11-23T08:26:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742223_1399 (size=61) 2024-11-23T08:26:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742224_1400 (size=61) 2024-11-23T08:26:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742223_1399 (size=61) 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 31a9b15322d53a7b44d2ddb2b2ab3b79, disabling compactions & flushes 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing d68b63a66937da30826ba80d223788e3, disabling compactions & flushes 2024-11-23T08:26:15,227 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,227 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. after waiting 0 ms 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. after waiting 0 ms 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,227 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,227 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for d68b63a66937da30826ba80d223788e3: Waiting for close lock at 1732350375227Disabling compacts and flushes for region at 1732350375227Disabling writes for close at 1732350375227Writing region close event to WAL at 1732350375227Closed at 1732350375227 2024-11-23T08:26:15,227 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: Waiting for close lock at 1732350375227Disabling compacts and flushes for region at 1732350375227Disabling writes for close at 1732350375227Writing region close event to WAL at 1732350375227Closed at 1732350375227 2024-11-23T08:26:15,228 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:26:15,228 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732350375228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350375228"}]},"ts":"1732350375228"} 2024-11-23T08:26:15,228 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732350375228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350375228"}]},"ts":"1732350375228"} 2024-11-23T08:26:15,232 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:26:15,233 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:26:15,233 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350375233"}]},"ts":"1732350375233"} 2024-11-23T08:26:15,234 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T08:26:15,234 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:26:15,235 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:26:15,235 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:26:15,235 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:26:15,235 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:26:15,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d68b63a66937da30826ba80d223788e3, ASSIGN}] 2024-11-23T08:26:15,237 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, ASSIGN 2024-11-23T08:26:15,237 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d68b63a66937da30826ba80d223788e3, ASSIGN 2024-11-23T08:26:15,237 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d68b63a66937da30826ba80d223788e3, ASSIGN; state=OFFLINE, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:26:15,237 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:26:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T08:26:15,388 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:26:15,388 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=d68b63a66937da30826ba80d223788e3, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:26:15,388 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:15,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, ASSIGN because future has completed 2024-11-23T08:26:15,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:15,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d68b63a66937da30826ba80d223788e3, ASSIGN because future has completed 2024-11-23T08:26:15,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure d68b63a66937da30826ba80d223788e3, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:26:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T08:26:15,545 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,545 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 31a9b15322d53a7b44d2ddb2b2ab3b79, NAME => 'testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:26:15,545 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. service=AccessControlService 2024-11-23T08:26:15,545 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:15,545 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,545 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:15,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,546 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => d68b63a66937da30826ba80d223788e3, NAME => 'testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:26:15,546 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. service=AccessControlService 2024-11-23T08:26:15,547 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:15,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:15,547 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,547 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,548 INFO [StoreOpener-d68b63a66937da30826ba80d223788e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,548 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31a9b15322d53a7b44d2ddb2b2ab3b79 columnFamilyName cf 2024-11-23T08:26:15,548 DEBUG [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:15,549 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] regionserver.HStore(327): Store=31a9b15322d53a7b44d2ddb2b2ab3b79/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:15,549 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,549 INFO [StoreOpener-d68b63a66937da30826ba80d223788e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d68b63a66937da30826ba80d223788e3 columnFamilyName cf 2024-11-23T08:26:15,549 DEBUG [StoreOpener-d68b63a66937da30826ba80d223788e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:15,549 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,549 INFO [StoreOpener-d68b63a66937da30826ba80d223788e3-1 {}] regionserver.HStore(327): Store=d68b63a66937da30826ba80d223788e3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,550 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,551 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,551 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,551 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,552 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,553 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:15,553 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 31a9b15322d53a7b44d2ddb2b2ab3b79; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60661517, jitterRate=-0.0960729569196701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:15,553 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:15,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: Running coprocessor pre-open hook at 1732350375546Writing region info on filesystem at 1732350375546Initializing all the Stores at 1732350375546Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350375546Cleaning up temporary data from old regions at 1732350375550 (+4 ms)Running coprocessor post-open hooks at 1732350375553 (+3 ms)Region opened successfully at 1732350375554 (+1 ms) 2024-11-23T08:26:15,554 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened d68b63a66937da30826ba80d223788e3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59905373, jitterRate=-0.10734038054943085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:15,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,554 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for d68b63a66937da30826ba80d223788e3: Running coprocessor pre-open hook at 1732350375547Writing region info on filesystem at 1732350375547Initializing all the Stores at 1732350375548 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350375548Cleaning up temporary data from old regions at 1732350375551 (+3 ms)Running coprocessor post-open hooks at 1732350375554 (+3 ms)Region opened successfully at 1732350375554 2024-11-23T08:26:15,566 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79., pid=183, masterSystemTime=1732350375541 2024-11-23T08:26:15,566 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3., pid=184, masterSystemTime=1732350375542 2024-11-23T08:26:15,567 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,568 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:15,568 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=d68b63a66937da30826ba80d223788e3, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:26:15,568 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,568 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,569 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:15,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure d68b63a66937da30826ba80d223788e3, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:26:15,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:15,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-11-23T08:26:15,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure d68b63a66937da30826ba80d223788e3, server=b33d9013c60a,45199,1732350106787 in 180 msec 2024-11-23T08:26:15,574 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d68b63a66937da30826ba80d223788e3, ASSIGN in 337 msec 2024-11-23T08:26:15,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-11-23T08:26:15,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049 in 182 msec 2024-11-23T08:26:15,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-23T08:26:15,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, ASSIGN in 338 msec 2024-11-23T08:26:15,577 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:26:15,577 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350375577"}]},"ts":"1732350375577"} 2024-11-23T08:26:15,579 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T08:26:15,580 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:26:15,580 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T08:26:15,584 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T08:26:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:15,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,678 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:15,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 470 msec 2024-11-23T08:26:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T08:26:15,837 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T08:26:15,837 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-23T08:26:15,837 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:15,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-23T08:26:15,841 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:15,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-23T08:26:15,841 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:15,846 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='0ab142c70245596307572ee3eac7e72b2', locateType=CURRENT is [region=testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:15,846 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1abe0baab6091b14b5e2a9a4a651e1b89', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:26:15,847 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='21f8c67db23f7b286637e01b1ebdede44', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:26:15,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='35d3da2daf23257516dd7eea1ce6d6912', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:26:15,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='45722cf652a7521df780f9e8703764860', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3., hostname=b33d9013c60a,45199,1732350106787, seqNum=2] 2024-11-23T08:26:15,851 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:15,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45199 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:15,855 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:15,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-23T08:26:15,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:15,858 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:15,860 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:15,864 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T08:26:15,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T08:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T08:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4969c9ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:15,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:15,871 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:15,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:15,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:15,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47292114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:15,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:15,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,872 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60386, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:15,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3788f82e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:15,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:15,874 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:15,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:15,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:15,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:15,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,876 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7292b52b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:15,877 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4849bfcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:15,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,879 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:15,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f41d7d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:15,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:15,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:15,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:15,881 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60150, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:15,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:15,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:15,884 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:15,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:15,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:15,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T08:26:15,885 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:15,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T08:26:15,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T08:26:15,887 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T08:26:15,888 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:15,890 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:15,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742225_1401 (size=152) 2024-11-23T08:26:15,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742225_1401 (size=152) 2024-11-23T08:26:15,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742225_1401 (size=152) 2024-11-23T08:26:15,896 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:15,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68b63a66937da30826ba80d223788e3}] 2024-11-23T08:26:15,897 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:15,897 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:15,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T08:26:16,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-11-23T08:26:16,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45199 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-11-23T08:26:16,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:16,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:16,049 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 31a9b15322d53a7b44d2ddb2b2ab3b79 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T08:26:16,049 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing d68b63a66937da30826ba80d223788e3 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T08:26:16,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/.tmp/cf/49825aae06544bbebb0c64a7bd3ab9f5 is 71, key is 038736f8b716b56ff265ba79abff1c8d/cf:q/1732350375851/Put/seqid=0 2024-11-23T08:26:16,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742226_1402 (size=5216) 2024-11-23T08:26:16,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742226_1402 (size=5216) 2024-11-23T08:26:16,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742226_1402 (size=5216) 2024-11-23T08:26:16,067 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/.tmp/cf/49825aae06544bbebb0c64a7bd3ab9f5 2024-11-23T08:26:16,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/.tmp/cf/d2eadcc8280744b193f2da09d4955c6c is 71, key is 10bc2bc87ba3c7e90ef5d05d84fce71a/cf:q/1732350375854/Put/seqid=0 2024-11-23T08:26:16,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/.tmp/cf/49825aae06544bbebb0c64a7bd3ab9f5 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/cf/49825aae06544bbebb0c64a7bd3ab9f5 2024-11-23T08:26:16,077 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/cf/49825aae06544bbebb0c64a7bd3ab9f5, entries=2, sequenceid=5, filesize=5.1 K 2024-11-23T08:26:16,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 31a9b15322d53a7b44d2ddb2b2ab3b79 in 30ms, sequenceid=5, compaction requested=false 2024-11-23T08:26:16,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-23T08:26:16,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: 2024-11-23T08:26:16,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T08:26:16,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:16,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/cf/49825aae06544bbebb0c64a7bd3ab9f5] hfiles 2024-11-23T08:26:16,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/cf/49825aae06544bbebb0c64a7bd3ab9f5 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742227_1403 (size=8392) 2024-11-23T08:26:16,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742227_1403 (size=8392) 2024-11-23T08:26:16,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742227_1403 (size=8392) 2024-11-23T08:26:16,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/.tmp/cf/d2eadcc8280744b193f2da09d4955c6c 2024-11-23T08:26:16,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742228_1404 (size=103) 2024-11-23T08:26:16,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742228_1404 (size=103) 2024-11-23T08:26:16,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742228_1404 (size=103) 2024-11-23T08:26:16,085 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:16,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-23T08:26:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-11-23T08:26:16,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:16,086 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:16,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/.tmp/cf/d2eadcc8280744b193f2da09d4955c6c as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/cf/d2eadcc8280744b193f2da09d4955c6c 2024-11-23T08:26:16,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79 in 191 msec 2024-11-23T08:26:16,091 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/cf/d2eadcc8280744b193f2da09d4955c6c, entries=48, sequenceid=5, filesize=8.2 K 2024-11-23T08:26:16,092 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for d68b63a66937da30826ba80d223788e3 in 43ms, sequenceid=5, compaction requested=false 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for d68b63a66937da30826ba80d223788e3: 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/cf/d2eadcc8280744b193f2da09d4955c6c] hfiles 2024-11-23T08:26:16,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/cf/d2eadcc8280744b193f2da09d4955c6c for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742229_1405 (size=103) 2024-11-23T08:26:16,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742229_1405 (size=103) 2024-11-23T08:26:16,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742229_1405 (size=103) 2024-11-23T08:26:16,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:26:16,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-11-23T08:26:16,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-11-23T08:26:16,098 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:16,098 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d68b63a66937da30826ba80d223788e3 2024-11-23T08:26:16,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-11-23T08:26:16,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d68b63a66937da30826ba80d223788e3 in 203 msec 2024-11-23T08:26:16,101 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:16,101 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:16,102 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:16,102 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,102 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742230_1406 (size=609) 2024-11-23T08:26:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742230_1406 (size=609) 2024-11-23T08:26:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742230_1406 (size=609) 2024-11-23T08:26:16,114 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:16,118 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:16,118 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-23T08:26:16,119 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:16,119 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T08:26:16,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 234 msec 2024-11-23T08:26:16,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T08:26:16,207 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T08:26:16,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T08:26:16,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T08:26:16,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T08:26:16,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T08:26:16,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T08:26:16,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T08:26:17,254 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0007_000001 (auth:SIMPLE) from 127.0.0.1:41004 2024-11-23T08:26:17,265 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000001/launch_container.sh] 2024-11-23T08:26:17,265 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000001/container_tokens] 2024-11-23T08:26:17,265 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0007/container_1732350113698_0007_01_000001/sysfs] 2024-11-23T08:26:18,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:26:26,214 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350386214 2024-11-23T08:26:26,215 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350386214, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350386214, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:26,242 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:26,242 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350386214, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350386214/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T08:26:26,244 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:26:26,246 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:26:26,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T08:26:26,251 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350386251"}]},"ts":"1732350386251"} 2024-11-23T08:26:26,253 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-23T08:26:26,253 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-23T08:26:26,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-23T08:26:26,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, UNASSIGN}] 2024-11-23T08:26:26,256 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, UNASSIGN 2024-11-23T08:26:26,257 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, UNASSIGN 2024-11-23T08:26:26,257 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=fddfaf52409cd47bf85d7a5802de7d22, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:26,257 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b44b16770b6526638d49bb879fe3ddcb, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:26,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, UNASSIGN because future has completed 2024-11-23T08:26:26,259 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:26,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:26,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, UNASSIGN because future has completed 2024-11-23T08:26:26,260 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:26,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:26,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T08:26:26,412 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:26,412 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing fddfaf52409cd47bf85d7a5802de7d22, disabling compactions & flushes 2024-11-23T08:26:26,413 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. after waiting 0 ms 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:26,413 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing b44b16770b6526638d49bb879fe3ddcb, disabling compactions & flushes 2024-11-23T08:26:26,413 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. after waiting 0 ms 2024-11-23T08:26:26,413 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:26,416 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:26,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:26,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:26,417 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb. 2024-11-23T08:26:26,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:26,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for b44b16770b6526638d49bb879fe3ddcb: Waiting for close lock at 1732350386413Running coprocessor pre-close hooks at 1732350386413Disabling compacts and flushes for region at 1732350386413Disabling writes for close at 1732350386413Writing region close event to WAL at 1732350386414 (+1 ms)Running coprocessor post-close hooks at 1732350386417 (+3 ms)Closed at 1732350386417 2024-11-23T08:26:26,417 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22. 2024-11-23T08:26:26,417 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for fddfaf52409cd47bf85d7a5802de7d22: Waiting for close lock at 1732350386412Running coprocessor pre-close hooks at 1732350386412Disabling compacts and flushes for region at 1732350386412Disabling writes for close at 1732350386413 (+1 ms)Writing region close event to WAL at 1732350386413Running coprocessor post-close hooks at 1732350386417 (+4 ms)Closed at 1732350386417 2024-11-23T08:26:26,419 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:26,420 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b44b16770b6526638d49bb879fe3ddcb, regionState=CLOSED 2024-11-23T08:26:26,420 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=fddfaf52409cd47bf85d7a5802de7d22, regionState=CLOSED 2024-11-23T08:26:26,420 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:26,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:26,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:26,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-11-23T08:26:26,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure b44b16770b6526638d49bb879fe3ddcb, server=b33d9013c60a,39649,1732350107049 in 162 msec 2024-11-23T08:26:26,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-11-23T08:26:26,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure fddfaf52409cd47bf85d7a5802de7d22, server=b33d9013c60a,43417,1732350106962 in 164 msec 2024-11-23T08:26:26,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b44b16770b6526638d49bb879fe3ddcb, UNASSIGN in 169 msec 2024-11-23T08:26:26,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-23T08:26:26,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=fddfaf52409cd47bf85d7a5802de7d22, UNASSIGN in 170 msec 2024-11-23T08:26:26,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-11-23T08:26:26,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 173 msec 2024-11-23T08:26:26,430 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350386430"}]},"ts":"1732350386430"} 2024-11-23T08:26:26,431 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-23T08:26:26,431 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-23T08:26:26,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 184 msec 2024-11-23T08:26:26,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T08:26:26,568 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T08:26:26,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,570 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,570 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,572 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,574 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:26,574 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:26,575 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/recovered.edits] 2024-11-23T08:26:26,575 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/recovered.edits] 2024-11-23T08:26:26,578 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/cf/3703de055a744a39b1b8f35e2345a955 2024-11-23T08:26:26,578 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/cf/311013e515da41c788db445a6aa1765b 2024-11-23T08:26:26,581 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22/recovered.edits/9.seqid 2024-11-23T08:26:26,581 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb/recovered.edits/9.seqid 2024-11-23T08:26:26,581 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/fddfaf52409cd47bf85d7a5802de7d22 2024-11-23T08:26:26,581 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportExpiredSnapshot/b44b16770b6526638d49bb879fe3ddcb 2024-11-23T08:26:26,581 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-23T08:26:26,584 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,587 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-23T08:26:26,590 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-23T08:26:26,591 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,591 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-23T08:26:26,592 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350386591"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:26,592 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350386591"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:26,595 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:26:26,595 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b44b16770b6526638d49bb879fe3ddcb, NAME => 'testtb-testExportExpiredSnapshot,,1732350373565.b44b16770b6526638d49bb879fe3ddcb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fddfaf52409cd47bf85d7a5802de7d22, NAME => 'testtb-testExportExpiredSnapshot,1,1732350373565.fddfaf52409cd47bf85d7a5802de7d22.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:26:26,595 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-23T08:26:26,595 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350386595"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:26,598 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-23T08:26:26,598 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 31 msec 2024-11-23T08:26:26,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T08:26:26,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T08:26:26,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T08:26:26,637 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:26,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-23T08:26:26,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:26,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:26,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:26,647 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:26,647 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-23T08:26:26,647 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T08:26:26,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T08:26:26,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-23T08:26:26,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-23T08:26:26,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-23T08:26:26,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T08:26:26,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-23T08:26:26,685 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=810 (was 817), OpenFileDescriptor=799 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=639 (was 773), ProcessCount=13 (was 19), AvailableMemoryMB=3119 (was 2486) - AvailableMemoryMB LEAK? - 2024-11-23T08:26:26,685 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-11-23T08:26:26,707 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=810, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=639, ProcessCount=13, AvailableMemoryMB=3119 2024-11-23T08:26:26,707 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-11-23T08:26:26,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:26:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:26,710 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:26:26,710 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:26,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-11-23T08:26:26,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T08:26:26,711 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:26:26,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742231_1407 (size=412) 2024-11-23T08:26:26,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742231_1407 (size=412) 2024-11-23T08:26:26,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742231_1407 (size=412) 2024-11-23T08:26:26,720 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8f84dab2eb8883adce2e9e2db1fad300, NAME => 'testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:26,720 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 27297fd75e18310ae1af4e0da5af2c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742233_1409 (size=73) 2024-11-23T08:26:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742233_1409 (size=73) 2024-11-23T08:26:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742232_1408 (size=73) 2024-11-23T08:26:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742233_1409 (size=73) 2024-11-23T08:26:26,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742232_1408 (size=73) 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 27297fd75e18310ae1af4e0da5af2c71, disabling compactions & flushes 2024-11-23T08:26:26,732 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. after waiting 0 ms 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:26,732 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:26,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742232_1408 (size=73) 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 27297fd75e18310ae1af4e0da5af2c71: Waiting for close lock at 1732350386732Disabling compacts and flushes for region at 1732350386732Disabling writes for close at 1732350386732Writing region close event to WAL at 1732350386732Closed at 1732350386732 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 8f84dab2eb8883adce2e9e2db1fad300, disabling compactions & flushes 2024-11-23T08:26:26,732 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. after waiting 0 ms 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:26,732 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:26,732 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8f84dab2eb8883adce2e9e2db1fad300: Waiting for close lock at 1732350386732Disabling compacts and flushes for region at 1732350386732Disabling writes for close at 1732350386732Writing region close event to WAL at 1732350386732Closed at 1732350386732 2024-11-23T08:26:26,733 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:26:26,733 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732350386733"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350386733"}]},"ts":"1732350386733"} 2024-11-23T08:26:26,734 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732350386733"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350386733"}]},"ts":"1732350386733"} 2024-11-23T08:26:26,736 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:26:26,736 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:26:26,736 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350386736"}]},"ts":"1732350386736"} 2024-11-23T08:26:26,737 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T08:26:26,738 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:26:26,739 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:26:26,739 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:26:26,739 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:26:26,739 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:26:26,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, ASSIGN}] 2024-11-23T08:26:26,740 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, ASSIGN 2024-11-23T08:26:26,740 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, ASSIGN 2024-11-23T08:26:26,741 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:26:26,741 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:26:26,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T08:26:26,891 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:26:26,892 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=8f84dab2eb8883adce2e9e2db1fad300, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:26,892 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=27297fd75e18310ae1af4e0da5af2c71, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:26,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, ASSIGN because future has completed 2024-11-23T08:26:26,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, ASSIGN because future has completed 2024-11-23T08:26:26,895 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:26,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:27,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T08:26:27,051 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,051 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 27297fd75e18310ae1af4e0da5af2c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:26:27,051 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. service=AccessControlService 2024-11-23T08:26:27,052 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,052 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 8f84dab2eb8883adce2e9e2db1fad300, NAME => 'testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. service=AccessControlService 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,052 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,052 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,054 INFO [StoreOpener-27297fd75e18310ae1af4e0da5af2c71-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,054 INFO [StoreOpener-8f84dab2eb8883adce2e9e2db1fad300-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,055 INFO [StoreOpener-27297fd75e18310ae1af4e0da5af2c71-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27297fd75e18310ae1af4e0da5af2c71 columnFamilyName cf 2024-11-23T08:26:27,055 INFO [StoreOpener-8f84dab2eb8883adce2e9e2db1fad300-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8f84dab2eb8883adce2e9e2db1fad300 columnFamilyName cf 2024-11-23T08:26:27,055 DEBUG [StoreOpener-8f84dab2eb8883adce2e9e2db1fad300-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:27,055 DEBUG [StoreOpener-27297fd75e18310ae1af4e0da5af2c71-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:27,056 INFO [StoreOpener-27297fd75e18310ae1af4e0da5af2c71-1 {}] regionserver.HStore(327): Store=27297fd75e18310ae1af4e0da5af2c71/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:27,056 INFO [StoreOpener-8f84dab2eb8883adce2e9e2db1fad300-1 {}] regionserver.HStore(327): Store=8f84dab2eb8883adce2e9e2db1fad300/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:27,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,057 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,058 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,058 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,060 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:27,060 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:27,061 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 8f84dab2eb8883adce2e9e2db1fad300; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74056445, jitterRate=0.1035270243883133}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:27,061 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,061 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 27297fd75e18310ae1af4e0da5af2c71; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62046789, jitterRate=-0.07543079555034637}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:27,061 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,061 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 8f84dab2eb8883adce2e9e2db1fad300: Running coprocessor pre-open hook at 1732350387053Writing region info on filesystem at 1732350387053Initializing all the Stores at 1732350387053Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350387053Cleaning up temporary data from old regions at 1732350387057 (+4 ms)Running coprocessor post-open hooks at 1732350387061 (+4 ms)Region opened successfully at 1732350387061 2024-11-23T08:26:27,061 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 27297fd75e18310ae1af4e0da5af2c71: Running coprocessor pre-open hook at 1732350387052Writing region info on filesystem at 1732350387052Initializing all the Stores at 1732350387053 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350387053Cleaning up temporary data from old regions at 1732350387057 (+4 ms)Running coprocessor post-open hooks at 1732350387061 (+4 ms)Region opened successfully at 1732350387061 2024-11-23T08:26:27,062 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300., pid=198, masterSystemTime=1732350387048 2024-11-23T08:26:27,063 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., pid=199, masterSystemTime=1732350387048 2024-11-23T08:26:27,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,066 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=8f84dab2eb8883adce2e9e2db1fad300, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:27,066 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,066 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,067 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=27297fd75e18310ae1af4e0da5af2c71, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:27,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:27,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:27,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=196 2024-11-23T08:26:27,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962 in 176 msec 2024-11-23T08:26:27,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=197 2024-11-23T08:26:27,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049 in 176 msec 2024-11-23T08:26:27,075 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, ASSIGN in 334 msec 2024-11-23T08:26:27,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-23T08:26:27,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, ASSIGN in 335 msec 2024-11-23T08:26:27,077 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:26:27,077 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350387077"}]},"ts":"1732350387077"} 2024-11-23T08:26:27,078 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T08:26:27,080 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:26:27,080 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-23T08:26:27,083 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:26:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,150 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:27,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 440 msec 2024-11-23T08:26:27,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T08:26:27,338 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T08:26:27,338 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-23T08:26:27,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:27,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-11-23T08:26:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-23T08:26:27,345 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:27,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-23T08:26:27,345 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:26:27,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:26:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350387347 (current time:1732350387347). 2024-11-23T08:26:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T08:26:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78685d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:27,349 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@143c0e23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:27,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,350 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:27,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca518de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:27,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:27,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,352 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,354 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@714bad4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:27,355 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:27,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:27,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:27,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f117d06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:27,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:27,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,356 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60746, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:27,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88e2112, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:27,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:27,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,359 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,361 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501. 2024-11-23T08:26:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,362 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:26:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:26:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T08:26:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T08:26:27,365 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:27,366 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:27,368 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742234_1410 (size=185) 2024-11-23T08:26:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742234_1410 (size=185) 2024-11-23T08:26:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742234_1410 (size=185) 2024-11-23T08:26:27,376 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:27,376 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71}] 2024-11-23T08:26:27,377 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,377 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T08:26:27,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-23T08:26:27,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 8f84dab2eb8883adce2e9e2db1fad300: 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 27297fd75e18310ae1af4e0da5af2c71: 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:27,529 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742235_1411 (size=76) 2024-11-23T08:26:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742236_1412 (size=76) 2024-11-23T08:26:27,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742235_1411 (size=76) 2024-11-23T08:26:27,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-23T08:26:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742236_1412 (size=76) 2024-11-23T08:26:27,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-11-23T08:26:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742235_1411 (size=76) 2024-11-23T08:26:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742236_1412 (size=76) 2024-11-23T08:26:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-11-23T08:26:27,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-23T08:26:27,542 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,543 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 in 167 msec 2024-11-23T08:26:27,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-11-23T08:26:27,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 in 167 msec 2024-11-23T08:26:27,547 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:27,548 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:27,548 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:27,549 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,549 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742237_1413 (size=567) 2024-11-23T08:26:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742237_1413 (size=567) 2024-11-23T08:26:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742237_1413 (size=567) 2024-11-23T08:26:27,563 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:27,567 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:27,568 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,569 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:27,569 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T08:26:27,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 206 msec 2024-11-23T08:26:27,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T08:26:27,677 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T08:26:27,681 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='034d096f6ba873f4fb5732bbea51e22fd', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:27,683 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1c2d650df496c75e1c4f948a3a227d0c4', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,683 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2b73163f75dd050d6182656f42a0a770e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,684 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='369a7d713c05b94c6ede6e08d109eac5b', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,685 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='42df99f421edd97be9bc8a15c66aad6a3', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,686 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='5828c4bb04faffd3f458d4679d4942afd', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,686 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='6db5cedd0fca5a97192b4f16572df3b5a', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,687 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:27,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:27,689 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:26:27,691 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-23T08:26:27,691 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,691 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:27,693 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:26:27,698 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:26:27,703 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T08:26:27,705 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:26:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350387705 (current time:1732350387705). 2024-11-23T08:26:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T08:26:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30cce978, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:27,706 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76e03163, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:27,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,708 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:27,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353a2f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:27,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:27,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,710 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60828, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,711 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f2427e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:27,713 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:27,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:27,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:27,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2efe8454, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:27,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:27,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,714 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:27,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f13f07c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:27,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:27,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,717 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:27,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:27,721 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:27,722 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:27,723 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T08:26:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T08:26:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T08:26:27,725 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T08:26:27,726 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:27,728 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:27,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742238_1414 (size=180) 2024-11-23T08:26:27,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742238_1414 (size=180) 2024-11-23T08:26:27,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742238_1414 (size=180) 2024-11-23T08:26:27,734 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:27,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71}] 2024-11-23T08:26:27,735 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,735 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T08:26:27,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-23T08:26:27,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-11-23T08:26:27,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,887 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 27297fd75e18310ae1af4e0da5af2c71 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T08:26:27,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,888 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 8f84dab2eb8883adce2e9e2db1fad300 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T08:26:27,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/.tmp/cf/adc822315b774315828d740424330043 is 71, key is 10e5f8c05e5537ecdc29fbd35b15c5f6/cf:q/1732350387688/Put/seqid=0 2024-11-23T08:26:27,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/.tmp/cf/2753043d64ee461cb765680e19ce66df is 71, key is 0498cd5a27e887c02af9df787e752023/cf:q/1732350387687/Put/seqid=0 2024-11-23T08:26:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742239_1415 (size=5288) 2024-11-23T08:26:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742239_1415 (size=5288) 2024-11-23T08:26:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742239_1415 (size=5288) 2024-11-23T08:26:27,916 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/.tmp/cf/2753043d64ee461cb765680e19ce66df 2024-11-23T08:26:27,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742240_1416 (size=8324) 2024-11-23T08:26:27,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742240_1416 (size=8324) 2024-11-23T08:26:27,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742240_1416 (size=8324) 2024-11-23T08:26:27,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/.tmp/cf/2753043d64ee461cb765680e19ce66df as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df 2024-11-23T08:26:27,925 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/.tmp/cf/adc822315b774315828d740424330043 2024-11-23T08:26:27,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/.tmp/cf/adc822315b774315828d740424330043 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043 2024-11-23T08:26:27,931 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df, entries=3, sequenceid=6, filesize=5.2 K 2024-11-23T08:26:27,932 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 8f84dab2eb8883adce2e9e2db1fad300 in 44ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:27,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-23T08:26:27,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 8f84dab2eb8883adce2e9e2db1fad300: 2024-11-23T08:26:27,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T08:26:27,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:27,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df] hfiles 2024-11-23T08:26:27,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,936 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043, entries=47, sequenceid=6, filesize=8.1 K 2024-11-23T08:26:27,937 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 27297fd75e18310ae1af4e0da5af2c71 in 50ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:27,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 27297fd75e18310ae1af4e0da5af2c71: 2024-11-23T08:26:27,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T08:26:27,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:27,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043] hfiles 2024-11-23T08:26:27,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742241_1417 (size=115) 2024-11-23T08:26:27,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742241_1417 (size=115) 2024-11-23T08:26:27,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742241_1417 (size=115) 2024-11-23T08:26:27,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:27,949 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-11-23T08:26:27,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-11-23T08:26:27,949 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,949 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:27,952 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300 in 216 msec 2024-11-23T08:26:27,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742242_1418 (size=115) 2024-11-23T08:26:27,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742242_1418 (size=115) 2024-11-23T08:26:27,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742242_1418 (size=115) 2024-11-23T08:26:27,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:27,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-23T08:26:27,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-23T08:26:27,960 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,961 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:27,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-11-23T08:26:27,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 27297fd75e18310ae1af4e0da5af2c71 in 227 msec 2024-11-23T08:26:27,963 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:27,964 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:27,964 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:27,964 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,965 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742243_1419 (size=645) 2024-11-23T08:26:27,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742243_1419 (size=645) 2024-11-23T08:26:27,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742243_1419 (size=645) 2024-11-23T08:26:27,980 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:27,985 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:27,985 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:27,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:27,986 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T08:26:27,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 263 msec 2024-11-23T08:26:28,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T08:26:28,048 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T08:26:28,048 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048 2024-11-23T08:26:28,048 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:28,077 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:28,077 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:28,078 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:26:28,082 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742244_1420 (size=567) 2024-11-23T08:26:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742244_1420 (size=567) 2024-11-23T08:26:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742244_1420 (size=567) 2024-11-23T08:26:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742245_1421 (size=185) 2024-11-23T08:26:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742245_1421 (size=185) 2024-11-23T08:26:28,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742245_1421 (size=185) 2024-11-23T08:26:28,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:28,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:28,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-5597827904969429915.jar 2024-11-23T08:26:29,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-8370950459524776011.jar 2024-11-23T08:26:29,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:29,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:26:29,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:26:29,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:26:29,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:26:29,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:26:29,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:26:29,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:26:29,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:26:29,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:26:29,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:26:29,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:26:29,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:29,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:29,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:29,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:29,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:29,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:29,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:29,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742246_1422 (size=131440) 2024-11-23T08:26:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742246_1422 (size=131440) 2024-11-23T08:26:29,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742246_1422 (size=131440) 2024-11-23T08:26:29,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742247_1423 (size=4188619) 2024-11-23T08:26:29,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742247_1423 (size=4188619) 2024-11-23T08:26:29,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742247_1423 (size=4188619) 2024-11-23T08:26:29,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742248_1424 (size=1323991) 2024-11-23T08:26:29,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742248_1424 (size=1323991) 2024-11-23T08:26:29,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742248_1424 (size=1323991) 2024-11-23T08:26:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742249_1425 (size=903733) 2024-11-23T08:26:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742249_1425 (size=903733) 2024-11-23T08:26:29,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742249_1425 (size=903733) 2024-11-23T08:26:29,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742250_1426 (size=8360083) 2024-11-23T08:26:29,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742250_1426 (size=8360083) 2024-11-23T08:26:29,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742250_1426 (size=8360083) 2024-11-23T08:26:29,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742251_1427 (size=1877034) 2024-11-23T08:26:29,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742251_1427 (size=1877034) 2024-11-23T08:26:29,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742251_1427 (size=1877034) 2024-11-23T08:26:29,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742252_1428 (size=77835) 2024-11-23T08:26:29,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742252_1428 (size=77835) 2024-11-23T08:26:29,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742252_1428 (size=77835) 2024-11-23T08:26:29,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742253_1429 (size=440957) 2024-11-23T08:26:29,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742253_1429 (size=440957) 2024-11-23T08:26:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742253_1429 (size=440957) 2024-11-23T08:26:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742254_1430 (size=30949) 2024-11-23T08:26:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742254_1430 (size=30949) 2024-11-23T08:26:29,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742254_1430 (size=30949) 2024-11-23T08:26:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742255_1431 (size=1597347) 2024-11-23T08:26:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742255_1431 (size=1597347) 2024-11-23T08:26:29,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742255_1431 (size=1597347) 2024-11-23T08:26:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742256_1432 (size=4695811) 2024-11-23T08:26:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742256_1432 (size=4695811) 2024-11-23T08:26:29,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742256_1432 (size=4695811) 2024-11-23T08:26:29,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742257_1433 (size=232957) 2024-11-23T08:26:29,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742257_1433 (size=232957) 2024-11-23T08:26:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742257_1433 (size=232957) 2024-11-23T08:26:29,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742258_1434 (size=127628) 2024-11-23T08:26:29,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742258_1434 (size=127628) 2024-11-23T08:26:29,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742258_1434 (size=127628) 2024-11-23T08:26:29,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742259_1435 (size=20406) 2024-11-23T08:26:29,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742259_1435 (size=20406) 2024-11-23T08:26:29,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742259_1435 (size=20406) 2024-11-23T08:26:29,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742260_1436 (size=5175431) 2024-11-23T08:26:29,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742260_1436 (size=5175431) 2024-11-23T08:26:29,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742260_1436 (size=5175431) 2024-11-23T08:26:29,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742261_1437 (size=217634) 2024-11-23T08:26:29,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742261_1437 (size=217634) 2024-11-23T08:26:29,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742261_1437 (size=217634) 2024-11-23T08:26:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742262_1438 (size=1832290) 2024-11-23T08:26:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742262_1438 (size=1832290) 2024-11-23T08:26:29,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742262_1438 (size=1832290) 2024-11-23T08:26:29,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742263_1439 (size=322274) 2024-11-23T08:26:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742263_1439 (size=322274) 2024-11-23T08:26:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742263_1439 (size=322274) 2024-11-23T08:26:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742264_1440 (size=503880) 2024-11-23T08:26:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742264_1440 (size=503880) 2024-11-23T08:26:29,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742264_1440 (size=503880) 2024-11-23T08:26:29,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742265_1441 (size=6424742) 2024-11-23T08:26:29,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742265_1441 (size=6424742) 2024-11-23T08:26:29,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742265_1441 (size=6424742) 2024-11-23T08:26:29,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742266_1442 (size=29229) 2024-11-23T08:26:29,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742266_1442 (size=29229) 2024-11-23T08:26:29,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742266_1442 (size=29229) 2024-11-23T08:26:29,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742267_1443 (size=24096) 2024-11-23T08:26:29,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742267_1443 (size=24096) 2024-11-23T08:26:29,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742267_1443 (size=24096) 2024-11-23T08:26:29,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742268_1444 (size=111872) 2024-11-23T08:26:29,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742268_1444 (size=111872) 2024-11-23T08:26:29,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742268_1444 (size=111872) 2024-11-23T08:26:30,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742269_1445 (size=45609) 2024-11-23T08:26:30,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742269_1445 (size=45609) 2024-11-23T08:26:30,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742269_1445 (size=45609) 2024-11-23T08:26:30,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742270_1446 (size=136454) 2024-11-23T08:26:30,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742270_1446 (size=136454) 2024-11-23T08:26:30,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742270_1446 (size=136454) 2024-11-23T08:26:30,049 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:26:30,052 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-23T08:26:30,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742271_1447 (size=7) 2024-11-23T08:26:30,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742271_1447 (size=7) 2024-11-23T08:26:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742271_1447 (size=7) 2024-11-23T08:26:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742272_1448 (size=10) 2024-11-23T08:26:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742272_1448 (size=10) 2024-11-23T08:26:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742272_1448 (size=10) 2024-11-23T08:26:30,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742273_1449 (size=303903) 2024-11-23T08:26:30,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742273_1449 (size=303903) 2024-11-23T08:26:30,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742273_1449 (size=303903) 2024-11-23T08:26:30,154 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:26:30,154 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:26:30,167 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0008_000001 (auth:SIMPLE) from 127.0.0.1:45958 2024-11-23T08:26:31,961 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:26:35,513 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0008_000001 (auth:SIMPLE) from 127.0.0.1:42666 2024-11-23T08:26:35,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742274_1450 (size=349577) 2024-11-23T08:26:35,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742274_1450 (size=349577) 2024-11-23T08:26:35,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742274_1450 (size=349577) 2024-11-23T08:26:36,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T08:26:36,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T08:26:36,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T08:26:36,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742275_1451 (size=8568) 2024-11-23T08:26:36,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742275_1451 (size=8568) 2024-11-23T08:26:36,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742275_1451 (size=8568) 2024-11-23T08:26:36,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742276_1452 (size=460) 2024-11-23T08:26:36,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742276_1452 (size=460) 2024-11-23T08:26:36,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742276_1452 (size=460) 2024-11-23T08:26:36,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742277_1453 (size=8568) 2024-11-23T08:26:36,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742277_1453 (size=8568) 2024-11-23T08:26:36,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742277_1453 (size=8568) 2024-11-23T08:26:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742278_1454 (size=349577) 2024-11-23T08:26:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742278_1454 (size=349577) 2024-11-23T08:26:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742278_1454 (size=349577) 2024-11-23T08:26:38,266 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:26:38,267 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:26:38,272 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:38,272 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:26:38,273 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:26:38,273 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:38,273 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T08:26:38,273 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T08:26:38,273 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:38,274 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T08:26:38,274 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350388048/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T08:26:38,278 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T08:26:38,280 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350398280"}]},"ts":"1732350398280"} 2024-11-23T08:26:38,281 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T08:26:38,281 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-23T08:26:38,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-23T08:26:38,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, UNASSIGN}] 2024-11-23T08:26:38,284 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, UNASSIGN 2024-11-23T08:26:38,284 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, UNASSIGN 2024-11-23T08:26:38,285 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=27297fd75e18310ae1af4e0da5af2c71, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:38,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=8f84dab2eb8883adce2e9e2db1fad300, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:38,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, UNASSIGN because future has completed 2024-11-23T08:26:38,286 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:38,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:38,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, UNASSIGN because future has completed 2024-11-23T08:26:38,287 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:38,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T08:26:38,438 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:38,438 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:38,438 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 27297fd75e18310ae1af4e0da5af2c71, disabling compactions & flushes 2024-11-23T08:26:38,438 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:38,438 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:38,438 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. after waiting 0 ms 2024-11-23T08:26:38,439 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:38,440 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:38,440 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:38,440 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 8f84dab2eb8883adce2e9e2db1fad300, disabling compactions & flushes 2024-11-23T08:26:38,441 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:38,441 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:38,441 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. after waiting 0 ms 2024-11-23T08:26:38,441 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:38,442 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:38,443 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:38,443 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71. 2024-11-23T08:26:38,443 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 27297fd75e18310ae1af4e0da5af2c71: Waiting for close lock at 1732350398438Running coprocessor pre-close hooks at 1732350398438Disabling compacts and flushes for region at 1732350398438Disabling writes for close at 1732350398438Writing region close event to WAL at 1732350398439 (+1 ms)Running coprocessor post-close hooks at 1732350398443 (+4 ms)Closed at 1732350398443 2024-11-23T08:26:38,445 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:38,445 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=27297fd75e18310ae1af4e0da5af2c71, regionState=CLOSED 2024-11-23T08:26:38,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:38,447 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:38,447 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:38,447 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300. 2024-11-23T08:26:38,447 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 8f84dab2eb8883adce2e9e2db1fad300: Waiting for close lock at 1732350398440Running coprocessor pre-close hooks at 1732350398440Disabling compacts and flushes for region at 1732350398440Disabling writes for close at 1732350398441 (+1 ms)Writing region close event to WAL at 1732350398441Running coprocessor post-close hooks at 1732350398447 (+6 ms)Closed at 1732350398447 2024-11-23T08:26:38,451 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:38,451 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=8f84dab2eb8883adce2e9e2db1fad300, regionState=CLOSED 2024-11-23T08:26:38,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-11-23T08:26:38,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 27297fd75e18310ae1af4e0da5af2c71, server=b33d9013c60a,39649,1732350107049 in 161 msec 2024-11-23T08:26:38,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=27297fd75e18310ae1af4e0da5af2c71, UNASSIGN in 168 msec 2024-11-23T08:26:38,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:38,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-11-23T08:26:38,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 8f84dab2eb8883adce2e9e2db1fad300, server=b33d9013c60a,43417,1732350106962 in 166 msec 2024-11-23T08:26:38,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-23T08:26:38,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=8f84dab2eb8883adce2e9e2db1fad300, UNASSIGN in 170 msec 2024-11-23T08:26:38,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-11-23T08:26:38,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 174 msec 2024-11-23T08:26:38,458 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350398458"}]},"ts":"1732350398458"} 2024-11-23T08:26:38,460 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T08:26:38,460 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-23T08:26:38,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 182 msec 2024-11-23T08:26:38,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T08:26:38,597 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T08:26:38,598 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,599 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,600 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,602 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,603 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:38,603 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:38,605 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/recovered.edits] 2024-11-23T08:26:38,605 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/recovered.edits] 2024-11-23T08:26:38,608 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/cf/2753043d64ee461cb765680e19ce66df 2024-11-23T08:26:38,608 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/cf/adc822315b774315828d740424330043 2024-11-23T08:26:38,610 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71/recovered.edits/9.seqid 2024-11-23T08:26:38,610 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300/recovered.edits/9.seqid 2024-11-23T08:26:38,610 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/27297fd75e18310ae1af4e0da5af2c71 2024-11-23T08:26:38,610 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testEmptyExportFileSystemState/8f84dab2eb8883adce2e9e2db1fad300 2024-11-23T08:26:38,610 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-23T08:26:38,612 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,614 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-23T08:26:38,616 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-23T08:26:38,617 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,617 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-23T08:26:38,617 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350398617"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:38,617 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350398617"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:38,619 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:26:38,619 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 8f84dab2eb8883adce2e9e2db1fad300, NAME => 'testtb-testEmptyExportFileSystemState,,1732350386708.8f84dab2eb8883adce2e9e2db1fad300.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 27297fd75e18310ae1af4e0da5af2c71, NAME => 'testtb-testEmptyExportFileSystemState,1,1732350386708.27297fd75e18310ae1af4e0da5af2c71.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:26:38,619 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-23T08:26:38,619 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350398619"}]},"ts":"9223372036854775807"} 2024-11-23T08:26:38,621 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-23T08:26:38,622 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,623 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 24 msec 2024-11-23T08:26:38,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T08:26:38,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T08:26:38,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T08:26:38,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:38,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:38,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-11-23T08:26:38,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:38,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:38,681 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-23T08:26:38,681 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:38,681 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:38,681 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T08:26:38,685 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T08:26:38,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:38,688 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T08:26:38,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-23T08:26:38,708 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=820 (was 810) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35573 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 136077) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:33592 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:40303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_853835401_1 at /127.0.0.1:33552 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:35573 from appattempt_1732350113698_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_853835401_1 at /127.0.0.1:43300 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:43482 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6807 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:43354 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=819 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=617 (was 639), ProcessCount=19 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=2304 (was 3119) 2024-11-23T08:26:38,708 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-11-23T08:26:38,722 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=820, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=617, ProcessCount=19, AvailableMemoryMB=2304 2024-11-23T08:26:38,723 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-11-23T08:26:38,724 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:26:38,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:26:38,725 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:26:38,725 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:38,725 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-11-23T08:26:38,726 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:26:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T08:26:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742279_1455 (size=404) 2024-11-23T08:26:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742279_1455 (size=404) 2024-11-23T08:26:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742279_1455 (size=404) 2024-11-23T08:26:38,733 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7f52c75517f0bb3a74bf1656270f4674, NAME => 'testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:38,733 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 82159aebc53b39a493b146f912661aa3, NAME => 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742281_1457 (size=65) 2024-11-23T08:26:38,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742281_1457 (size=65) 2024-11-23T08:26:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742281_1457 (size=65) 2024-11-23T08:26:38,739 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:38,739 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 7f52c75517f0bb3a74bf1656270f4674, disabling compactions & flushes 2024-11-23T08:26:38,739 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:38,739 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742280_1456 (size=65) 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. after waiting 0 ms 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:38,740 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7f52c75517f0bb3a74bf1656270f4674: Waiting for close lock at 1732350398739Disabling compacts and flushes for region at 1732350398739Disabling writes for close at 1732350398740 (+1 ms)Writing region close event to WAL at 1732350398740Closed at 1732350398740 2024-11-23T08:26:38,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742280_1456 (size=65) 2024-11-23T08:26:38,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742280_1456 (size=65) 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 82159aebc53b39a493b146f912661aa3, disabling compactions & flushes 2024-11-23T08:26:38,740 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:38,740 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. after waiting 0 ms 2024-11-23T08:26:38,741 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:38,741 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:38,741 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 82159aebc53b39a493b146f912661aa3: Waiting for close lock at 1732350398740Disabling compacts and flushes for region at 1732350398740Disabling writes for close at 1732350398740Writing region close event to WAL at 1732350398741 (+1 ms)Closed at 1732350398741 2024-11-23T08:26:38,741 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:26:38,742 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350398742"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350398742"}]},"ts":"1732350398742"} 2024-11-23T08:26:38,742 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732350398742"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350398742"}]},"ts":"1732350398742"} 2024-11-23T08:26:38,743 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:26:38,744 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:26:38,744 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350398744"}]},"ts":"1732350398744"} 2024-11-23T08:26:38,745 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-23T08:26:38,746 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:26:38,747 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:26:38,747 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:26:38,747 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:26:38,747 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:26:38,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, ASSIGN}] 2024-11-23T08:26:38,748 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, ASSIGN 2024-11-23T08:26:38,748 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, ASSIGN 2024-11-23T08:26:38,748 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:26:38,748 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T08:26:38,899 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:26:38,899 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=7f52c75517f0bb3a74bf1656270f4674, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:38,899 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:38,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, ASSIGN because future has completed 2024-11-23T08:26:38,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:38,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, ASSIGN because future has completed 2024-11-23T08:26:38,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T08:26:39,055 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,055 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 7f52c75517f0bb3a74bf1656270f4674, NAME => 'testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:26:39,055 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. service=AccessControlService 2024-11-23T08:26:39,055 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,055 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 82159aebc53b39a493b146f912661aa3, NAME => 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. service=AccessControlService 2024-11-23T08:26:39,056 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,056 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,057 INFO [StoreOpener-7f52c75517f0bb3a74bf1656270f4674-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,057 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,058 INFO [StoreOpener-7f52c75517f0bb3a74bf1656270f4674-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f52c75517f0bb3a74bf1656270f4674 columnFamilyName cf 2024-11-23T08:26:39,058 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82159aebc53b39a493b146f912661aa3 columnFamilyName cf 2024-11-23T08:26:39,058 DEBUG [StoreOpener-7f52c75517f0bb3a74bf1656270f4674-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:39,058 DEBUG [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:39,058 INFO [StoreOpener-7f52c75517f0bb3a74bf1656270f4674-1 {}] regionserver.HStore(327): Store=7f52c75517f0bb3a74bf1656270f4674/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:39,058 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] regionserver.HStore(327): Store=82159aebc53b39a493b146f912661aa3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:39,058 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,058 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,059 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,060 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,060 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,062 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:39,062 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:26:39,062 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 82159aebc53b39a493b146f912661aa3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59692834, jitterRate=-0.11050745844841003}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:39,062 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 7f52c75517f0bb3a74bf1656270f4674; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67636852, jitterRate=0.007867634296417236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:39,062 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,062 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,063 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 7f52c75517f0bb3a74bf1656270f4674: Running coprocessor pre-open hook at 1732350399056Writing region info on filesystem at 1732350399056Initializing all the Stores at 1732350399057 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350399057Cleaning up temporary data from old regions at 1732350399059 (+2 ms)Running coprocessor post-open hooks at 1732350399062 (+3 ms)Region opened successfully at 1732350399062 2024-11-23T08:26:39,063 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 82159aebc53b39a493b146f912661aa3: Running coprocessor pre-open hook at 1732350399056Writing region info on filesystem at 1732350399056Initializing all the Stores at 1732350399057 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350399057Cleaning up temporary data from old regions at 1732350399059 (+2 ms)Running coprocessor post-open hooks at 1732350399062 (+3 ms)Region opened successfully at 1732350399062 2024-11-23T08:26:39,063 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674., pid=216, masterSystemTime=1732350399052 2024-11-23T08:26:39,063 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., pid=217, masterSystemTime=1732350399054 2024-11-23T08:26:39,065 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,065 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,078 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:39,078 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,078 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,079 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=7f52c75517f0bb3a74bf1656270f4674, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:39,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:39,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:39,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-11-23T08:26:39,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-11-23T08:26:39,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049 in 180 msec 2024-11-23T08:26:39,083 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962 in 178 msec 2024-11-23T08:26:39,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, ASSIGN in 335 msec 2024-11-23T08:26:39,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-11-23T08:26:39,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, ASSIGN in 336 msec 2024-11-23T08:26:39,085 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:26:39,085 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350399085"}]},"ts":"1732350399085"} 2024-11-23T08:26:39,086 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-23T08:26:39,087 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:26:39,087 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-23T08:26:39,090 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T08:26:39,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:39,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:39,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:39,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,140 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T08:26:39,140 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 414 msec 2024-11-23T08:26:39,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T08:26:39,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T08:26:39,358 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-23T08:26:39,359 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:39,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-11-23T08:26:39,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-23T08:26:39,365 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:39,365 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-23T08:26:39,365 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T08:26:39,368 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T08:26:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350399368 (current time:1732350399368). 2024-11-23T08:26:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T08:26:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3e73a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:39,370 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:39,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:39,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:39,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d1f0be9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:39,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:39,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,372 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:39,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@333d83ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:39,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:39,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:39,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:39,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,376 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:39,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58ebc7d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:39,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:39,377 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:39,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:39,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:39,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dd52a7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:39,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:39,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,378 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54684, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:39,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38d5368d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:39,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:39,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:39,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,384 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T08:26:39,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:39,387 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T08:26:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T08:26:39,387 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:39,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T08:26:39,388 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:39,391 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742282_1458 (size=161) 2024-11-23T08:26:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742282_1458 (size=161) 2024-11-23T08:26:39,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742282_1458 (size=161) 2024-11-23T08:26:39,399 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:39,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3}] 2024-11-23T08:26:39,400 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,400 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T08:26:39,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-11-23T08:26:39,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 7f52c75517f0bb3a74bf1656270f4674: 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 82159aebc53b39a493b146f912661aa3: 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T08:26:39,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:39,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:26:39,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742283_1459 (size=68) 2024-11-23T08:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742283_1459 (size=68) 2024-11-23T08:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742284_1460 (size=68) 2024-11-23T08:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742284_1460 (size=68) 2024-11-23T08:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742284_1460 (size=68) 2024-11-23T08:26:39,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742283_1459 (size=68) 2024-11-23T08:26:39,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-11-23T08:26:39,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-11-23T08:26:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-11-23T08:26:39,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-11-23T08:26:39,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,567 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,567 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,567 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 in 169 msec 2024-11-23T08:26:39,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-11-23T08:26:39,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 in 169 msec 2024-11-23T08:26:39,570 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:39,571 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:39,572 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:39,572 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-23T08:26:39,572 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-23T08:26:39,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742285_1461 (size=543) 2024-11-23T08:26:39,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742285_1461 (size=543) 2024-11-23T08:26:39,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742285_1461 (size=543) 2024-11-23T08:26:39,583 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:39,589 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:39,590 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-23T08:26:39,592 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:39,592 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T08:26:39,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 206 msec 2024-11-23T08:26:39,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T08:26:39,709 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T08:26:39,716 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='04a1d5e8880125649404be2700fa005fd', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:39,717 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='17e70e82ba1e9d4da71d3523cafb8a90e', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:39,718 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2f22df853bf8d6e6a3f771d45d3cc582b', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:39,719 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='358897d22c589dd621639218a16a31f0b', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:39,719 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='44cc13aef04db0f61c0bcc7358fa51354', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:26:39,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:39,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:26:39,725 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T08:26:39,727 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-23T08:26:39,727 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,727 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:26:39,728 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T08:26:39,732 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T08:26:39,736 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T08:26:39,738 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T08:26:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350399738 (current time:1732350399738). 2024-11-23T08:26:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:26:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T08:26:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:26:39,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f24844, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:39,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:39,739 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:39,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:39,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:39,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b081be4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:39,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:39,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,741 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:39,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a8bb47d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:39,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:39,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,742 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,743 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,744 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4df592db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:26:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:26:39,745 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:26:39,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:26:39,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:26:39,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3002744f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:26:39,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:26:39,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,746 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:26:39,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eef04fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:26:39,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:26:39,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:26:39,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,749 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:26:39,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:26:39,751 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:26:39,752 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:26:39,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:26:39,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:26:39,752 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:26:39,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T08:26:39,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:26:39,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T08:26:39,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T08:26:39,754 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:26:39,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T08:26:39,755 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:26:39,757 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:26:39,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742286_1462 (size=156) 2024-11-23T08:26:39,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742286_1462 (size=156) 2024-11-23T08:26:39,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742286_1462 (size=156) 2024-11-23T08:26:39,762 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:26:39,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3}] 2024-11-23T08:26:39,763 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,763 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T08:26:39,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-11-23T08:26:39,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-11-23T08:26:39,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 82159aebc53b39a493b146f912661aa3 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-23T08:26:39,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 7f52c75517f0bb3a74bf1656270f4674 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-23T08:26:39,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/.tmp/cf/971effed9ebc4be4afd59b3b42d99a88 is 69, key is 04a1d5e8880125649404be2700fa005fd/cf:q/1732350399722/Put/seqid=0 2024-11-23T08:26:39,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/.tmp/cf/45d0e2f692db4f7aa0d63319aaf2dd27 is 71, key is 1677bd8ef2f43e8ad44a8e0b069a554e/cf:q/1732350399724/Put/seqid=0 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742287_1463 (size=5149) 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742288_1464 (size=8460) 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742288_1464 (size=8460) 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742287_1463 (size=5149) 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742288_1464 (size=8460) 2024-11-23T08:26:39,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742287_1463 (size=5149) 2024-11-23T08:26:39,947 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/.tmp/cf/971effed9ebc4be4afd59b3b42d99a88 2024-11-23T08:26:39,947 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/.tmp/cf/45d0e2f692db4f7aa0d63319aaf2dd27 2024-11-23T08:26:39,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/.tmp/cf/45d0e2f692db4f7aa0d63319aaf2dd27 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 2024-11-23T08:26:39,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/.tmp/cf/971effed9ebc4be4afd59b3b42d99a88 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 2024-11-23T08:26:39,954 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27, entries=49, sequenceid=6, filesize=8.3 K 2024-11-23T08:26:39,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88, entries=1, sequenceid=6, filesize=5.0 K 2024-11-23T08:26:39,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 7f52c75517f0bb3a74bf1656270f4674 in 40ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:39,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 82159aebc53b39a493b146f912661aa3 in 40ms, sequenceid=6, compaction requested=false 2024-11-23T08:26:39,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T08:26:39,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 7f52c75517f0bb3a74bf1656270f4674: 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 82159aebc53b39a493b146f912661aa3: 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. for snaptb0-testExportWithChecksum completed. 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. for snaptb0-testExportWithChecksum completed. 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88] hfiles 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27] hfiles 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 for snapshot=snaptb0-testExportWithChecksum 2024-11-23T08:26:39,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 for snapshot=snaptb0-testExportWithChecksum 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742289_1465 (size=107) 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742290_1466 (size=107) 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742290_1466 (size=107) 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742289_1465 (size=107) 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742290_1466 (size=107) 2024-11-23T08:26:39,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742289_1465 (size=107) 2024-11-23T08:26:39,961 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:26:39,961 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-11-23T08:26:39,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:39,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-11-23T08:26:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-11-23T08:26:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-11-23T08:26:39,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,962 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,962 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:39,962 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:26:39,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7f52c75517f0bb3a74bf1656270f4674 in 200 msec 2024-11-23T08:26:39,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-11-23T08:26:39,964 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 82159aebc53b39a493b146f912661aa3 in 200 msec 2024-11-23T08:26:39,964 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:26:39,965 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:26:39,965 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:26:39,965 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-23T08:26:39,966 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T08:26:39,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742291_1467 (size=621) 2024-11-23T08:26:39,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742291_1467 (size=621) 2024-11-23T08:26:39,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742291_1467 (size=621) 2024-11-23T08:26:39,978 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:26:39,982 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:26:39,982 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T08:26:39,983 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:26:39,983 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T08:26:39,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 230 msec 2024-11-23T08:26:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T08:26:40,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T08:26:40,068 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068 2024-11-23T08:26:40,068 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:40,100 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:26:40,100 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@22fc879, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T08:26:40,101 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:26:40,104 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T08:26:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:40,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,008 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-12309520208912179371.jar 2024-11-23T08:26:41,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,009 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-11277373133376013396.jar 2024-11-23T08:26:41,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:26:41,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:26:41,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:26:41,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:26:41,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:26:41,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:26:41,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:41,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:41,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:41,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:41,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:26:41,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:41,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:26:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742292_1468 (size=131440) 2024-11-23T08:26:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742292_1468 (size=131440) 2024-11-23T08:26:41,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742292_1468 (size=131440) 2024-11-23T08:26:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742293_1469 (size=4188619) 2024-11-23T08:26:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742293_1469 (size=4188619) 2024-11-23T08:26:41,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742293_1469 (size=4188619) 2024-11-23T08:26:41,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742294_1470 (size=1323991) 2024-11-23T08:26:41,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742294_1470 (size=1323991) 2024-11-23T08:26:41,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742294_1470 (size=1323991) 2024-11-23T08:26:41,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742295_1471 (size=903733) 2024-11-23T08:26:41,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742295_1471 (size=903733) 2024-11-23T08:26:41,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742295_1471 (size=903733) 2024-11-23T08:26:41,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742296_1472 (size=8360083) 2024-11-23T08:26:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742296_1472 (size=8360083) 2024-11-23T08:26:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742296_1472 (size=8360083) 2024-11-23T08:26:41,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742297_1473 (size=1877034) 2024-11-23T08:26:41,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742297_1473 (size=1877034) 2024-11-23T08:26:41,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742297_1473 (size=1877034) 2024-11-23T08:26:41,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742298_1474 (size=77835) 2024-11-23T08:26:41,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742298_1474 (size=77835) 2024-11-23T08:26:41,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742298_1474 (size=77835) 2024-11-23T08:26:41,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742299_1475 (size=30949) 2024-11-23T08:26:41,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742299_1475 (size=30949) 2024-11-23T08:26:41,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742299_1475 (size=30949) 2024-11-23T08:26:41,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742300_1476 (size=1597347) 2024-11-23T08:26:41,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742300_1476 (size=1597347) 2024-11-23T08:26:41,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742300_1476 (size=1597347) 2024-11-23T08:26:41,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742301_1477 (size=4695811) 2024-11-23T08:26:41,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742301_1477 (size=4695811) 2024-11-23T08:26:41,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742301_1477 (size=4695811) 2024-11-23T08:26:41,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742302_1478 (size=232957) 2024-11-23T08:26:41,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742302_1478 (size=232957) 2024-11-23T08:26:41,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742302_1478 (size=232957) 2024-11-23T08:26:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742303_1479 (size=127628) 2024-11-23T08:26:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742303_1479 (size=127628) 2024-11-23T08:26:41,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742303_1479 (size=127628) 2024-11-23T08:26:41,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742304_1480 (size=20406) 2024-11-23T08:26:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742304_1480 (size=20406) 2024-11-23T08:26:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742304_1480 (size=20406) 2024-11-23T08:26:41,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742305_1481 (size=5175431) 2024-11-23T08:26:41,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742305_1481 (size=5175431) 2024-11-23T08:26:41,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742305_1481 (size=5175431) 2024-11-23T08:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742306_1482 (size=6424742) 2024-11-23T08:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742306_1482 (size=6424742) 2024-11-23T08:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742306_1482 (size=6424742) 2024-11-23T08:26:41,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742307_1483 (size=217634) 2024-11-23T08:26:41,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742307_1483 (size=217634) 2024-11-23T08:26:41,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742307_1483 (size=217634) 2024-11-23T08:26:41,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742308_1484 (size=1832290) 2024-11-23T08:26:41,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742308_1484 (size=1832290) 2024-11-23T08:26:41,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742308_1484 (size=1832290) 2024-11-23T08:26:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742309_1485 (size=322274) 2024-11-23T08:26:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742309_1485 (size=322274) 2024-11-23T08:26:41,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742309_1485 (size=322274) 2024-11-23T08:26:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742310_1486 (size=503880) 2024-11-23T08:26:41,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742310_1486 (size=503880) 2024-11-23T08:26:41,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742310_1486 (size=503880) 2024-11-23T08:26:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742311_1487 (size=29229) 2024-11-23T08:26:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742311_1487 (size=29229) 2024-11-23T08:26:41,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742311_1487 (size=29229) 2024-11-23T08:26:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742312_1488 (size=24096) 2024-11-23T08:26:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742312_1488 (size=24096) 2024-11-23T08:26:41,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742312_1488 (size=24096) 2024-11-23T08:26:41,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742313_1489 (size=111872) 2024-11-23T08:26:41,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742313_1489 (size=111872) 2024-11-23T08:26:41,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742313_1489 (size=111872) 2024-11-23T08:26:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742314_1490 (size=440957) 2024-11-23T08:26:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742314_1490 (size=440957) 2024-11-23T08:26:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742314_1490 (size=440957) 2024-11-23T08:26:41,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742315_1491 (size=45609) 2024-11-23T08:26:41,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742315_1491 (size=45609) 2024-11-23T08:26:41,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742315_1491 (size=45609) 2024-11-23T08:26:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742316_1492 (size=136454) 2024-11-23T08:26:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742316_1492 (size=136454) 2024-11-23T08:26:41,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742316_1492 (size=136454) 2024-11-23T08:26:41,363 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:26:41,365 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T08:26:41,367 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-11-23T08:26:41,367 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-11-23T08:26:41,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742317_1493 (size=441) 2024-11-23T08:26:41,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742317_1493 (size=441) 2024-11-23T08:26:41,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742317_1493 (size=441) 2024-11-23T08:26:41,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742318_1494 (size=21) 2024-11-23T08:26:41,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742318_1494 (size=21) 2024-11-23T08:26:41,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742318_1494 (size=21) 2024-11-23T08:26:41,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742319_1495 (size=304048) 2024-11-23T08:26:41,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742319_1495 (size=304048) 2024-11-23T08:26:41,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742319_1495 (size=304048) 2024-11-23T08:26:41,750 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:26:42,744 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:26:42,744 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:26:42,748 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0008_000001 (auth:SIMPLE) from 127.0.0.1:53164 2024-11-23T08:26:43,264 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:46850 2024-11-23T08:26:44,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:26:46,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T08:26:46,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-23T08:26:46,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T08:26:46,914 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T08:26:47,024 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T08:26:47,079 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T08:26:47,209 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-23T08:26:47,209 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T08:26:47,863 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0008/container_1732350113698_0008_01_000001/launch_container.sh] 2024-11-23T08:26:47,864 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0008/container_1732350113698_0008_01_000001/container_tokens] 2024-11-23T08:26:47,864 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0008/container_1732350113698_0008_01_000001/sysfs] 2024-11-23T08:26:48,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:50534 2024-11-23T08:26:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742320_1496 (size=349746) 2024-11-23T08:26:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742320_1496 (size=349746) 2024-11-23T08:26:48,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742320_1496 (size=349746) 2024-11-23T08:26:49,150 INFO [regionserver/b33d9013c60a:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T08:26:49,158 INFO [regionserver/b33d9013c60a:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T08:26:49,159 INFO [regionserver/b33d9013c60a:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T08:26:50,175 INFO [regionserver/b33d9013c60a:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 15547 ms 2024-11-23T08:26:50,650 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:53172 2024-11-23T08:26:50,666 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:46862 2024-11-23T08:26:51,751 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:26:56,365 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 31a9b15322d53a7b44d2ddb2b2ab3b79 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:26:56,365 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7f52c75517f0bb3a74bf1656270f4674 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:26:56,365 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 82159aebc53b39a493b146f912661aa3 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:26:56,365 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d68b63a66937da30826ba80d223788e3 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:26:56,368 DEBUG [master/b33d9013c60a:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-23T08:26:56,369 INFO [master/b33d9013c60a:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-23T08:26:56,369 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-23T08:26:56,370 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 3 regions 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:26:56,385 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:26:56,385 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:26:56,385 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:26:56,385 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-11-23T08:26:56,388 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-11-23T08:26:56,389 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25187623876718995, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8526829262536787, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8575790006018934, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-11-23T08:26:56,765 INFO [master/b33d9013c60a:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 379 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.25187623876718995 to a new imbalance of 0.017931866503478076. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8526829262536787, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8575790006018934, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-23T08:26:56,768 INFO [master/b33d9013c60a:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-11-23T08:26:56,769 INFO [master/b33d9013c60a:0.Chore.1 {}] master.HMaster(2172): balance hri=31a9b15322d53a7b44d2ddb2b2ab3b79, source=b33d9013c60a,39649,1732350107049, destination=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:56,770 DEBUG [master/b33d9013c60a:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE 2024-11-23T08:26:56,770 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE 2024-11-23T08:26:56,775 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:26:56,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE because future has completed 2024-11-23T08:26:56,779 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:56,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:26:56,933 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:56,933 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:56,934 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing 31a9b15322d53a7b44d2ddb2b2ab3b79, disabling compactions & flushes 2024-11-23T08:26:56,934 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:56,934 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:56,934 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. after waiting 0 ms 2024-11-23T08:26:56,934 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:56,982 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:26:56,983 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:56,983 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:56,983 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: Waiting for close lock at 1732350416933Running coprocessor pre-close hooks at 1732350416933Disabling compacts and flushes for region at 1732350416933Disabling writes for close at 1732350416934 (+1 ms)Writing region close event to WAL at 1732350416969 (+35 ms)Running coprocessor post-close hooks at 1732350416983 (+14 ms)Closed at 1732350416983 2024-11-23T08:26:56,984 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding 31a9b15322d53a7b44d2ddb2b2ab3b79 move to b33d9013c60a,43417,1732350106962 record at close sequenceid=5 2024-11-23T08:26:56,993 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:56,994 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=CLOSED 2024-11-23T08:26:56,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:26:57,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-11-23T08:26:57,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,39649,1732350107049 in 220 msec 2024-11-23T08:26:57,004 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE; state=CLOSED, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:26:57,155 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T08:26:57,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:57,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE because future has completed 2024-11-23T08:26:57,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:57,201 INFO [regionserver/b33d9013c60a:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. because 5c2409a8a8e1fdedb01950f8285b9431/l has an old edit so flush to free WALs after random delay 161841 ms 2024-11-23T08:26:57,318 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:57,318 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 31a9b15322d53a7b44d2ddb2b2ab3b79, NAME => 'testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:26:57,318 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. service=AccessControlService 2024-11-23T08:26:57,319 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:57,319 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,319 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:57,319 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,319 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,324 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,326 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31a9b15322d53a7b44d2ddb2b2ab3b79 columnFamilyName cf 2024-11-23T08:26:57,326 DEBUG [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:57,353 DEBUG [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/cf/49825aae06544bbebb0c64a7bd3ab9f5 2024-11-23T08:26:57,353 INFO [StoreOpener-31a9b15322d53a7b44d2ddb2b2ab3b79-1 {}] regionserver.HStore(327): Store=31a9b15322d53a7b44d2ddb2b2ab3b79/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:57,354 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,355 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,358 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,359 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,359 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,371 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,372 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1114): Opened 31a9b15322d53a7b44d2ddb2b2ab3b79; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74295255, jitterRate=0.10708557069301605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:57,372 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:26:57,372 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: Running coprocessor pre-open hook at 1732350417319Writing region info on filesystem at 1732350417319Initializing all the Stores at 1732350417321 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350417321Cleaning up temporary data from old regions at 1732350417359 (+38 ms)Running coprocessor post-open hooks at 1732350417372 (+13 ms)Region opened successfully at 1732350417372 2024-11-23T08:26:57,373 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79., pid=226, masterSystemTime=1732350417313 2024-11-23T08:26:57,376 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:57,376 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:26:57,377 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=31a9b15322d53a7b44d2ddb2b2ab3b79, regionState=OPEN, openSeqNum=9, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:57,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:57,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-11-23T08:26:57,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 31a9b15322d53a7b44d2ddb2b2ab3b79, server=b33d9013c60a,43417,1732350106962 in 222 msec 2024-11-23T08:26:57,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=31a9b15322d53a7b44d2ddb2b2ab3b79, REOPEN/MOVE in 615 msec 2024-11-23T08:26:57,473 INFO [master/b33d9013c60a:0.Chore.1 {}] master.HMaster(2172): balance hri=82159aebc53b39a493b146f912661aa3, source=b33d9013c60a,43417,1732350106962, destination=b33d9013c60a,45199,1732350106787 2024-11-23T08:26:57,474 DEBUG [master/b33d9013c60a:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE 2024-11-23T08:26:57,474 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE 2024-11-23T08:26:57,476 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:26:57,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE because future has completed 2024-11-23T08:26:57,478 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:26:57,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:26:57,631 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:57,631 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:26:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 82159aebc53b39a493b146f912661aa3, disabling compactions & flushes 2024-11-23T08:26:57,632 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. after waiting 0 ms 2024-11-23T08:26:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:57,657 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:26:57,658 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:26:57,658 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:57,658 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 82159aebc53b39a493b146f912661aa3: Waiting for close lock at 1732350417632Running coprocessor pre-close hooks at 1732350417632Disabling compacts and flushes for region at 1732350417632Disabling writes for close at 1732350417632Writing region close event to WAL at 1732350417643 (+11 ms)Running coprocessor post-close hooks at 1732350417658 (+15 ms)Closed at 1732350417658 2024-11-23T08:26:57,658 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionServer(3302): Adding 82159aebc53b39a493b146f912661aa3 move to b33d9013c60a,45199,1732350106787 record at close sequenceid=6 2024-11-23T08:26:57,666 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:57,667 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=CLOSED 2024-11-23T08:26:57,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:26:57,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-11-23T08:26:57,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,43417,1732350106962 in 193 msec 2024-11-23T08:26:57,675 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE; state=CLOSED, location=b33d9013c60a,45199,1732350106787; forceNewPlan=false, retain=false 2024-11-23T08:26:57,826 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T08:26:57,826 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=OPENING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:26:57,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE because future has completed 2024-11-23T08:26:57,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:26:58,005 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:58,005 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7752): Opening region: {ENCODED => 82159aebc53b39a493b146f912661aa3, NAME => 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:26:58,005 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. service=AccessControlService 2024-11-23T08:26:58,005 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:26:58,005 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,005 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:26:58,006 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7794): checking encryption for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,006 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7797): checking classloading for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,031 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,033 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82159aebc53b39a493b146f912661aa3 columnFamilyName cf 2024-11-23T08:26:58,033 DEBUG [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:26:58,084 DEBUG [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 2024-11-23T08:26:58,084 INFO [StoreOpener-82159aebc53b39a493b146f912661aa3-1 {}] regionserver.HStore(327): Store=82159aebc53b39a493b146f912661aa3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:26:58,084 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1038): replaying wal for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,086 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,088 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,090 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1048): stopping wal replay for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,090 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1060): Cleaning up temporary data for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,093 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1093): writing seq id for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,095 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1114): Opened 82159aebc53b39a493b146f912661aa3; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58901308, jitterRate=-0.1223021149635315}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:26:58,095 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82159aebc53b39a493b146f912661aa3 2024-11-23T08:26:58,095 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1006): Region open journal for 82159aebc53b39a493b146f912661aa3: Running coprocessor pre-open hook at 1732350418006Writing region info on filesystem at 1732350418006Initializing all the Stores at 1732350418007 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350418007Cleaning up temporary data from old regions at 1732350418090 (+83 ms)Running coprocessor post-open hooks at 1732350418095 (+5 ms)Region opened successfully at 1732350418095 2024-11-23T08:26:58,096 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3., pid=229, masterSystemTime=1732350417988 2024-11-23T08:26:58,098 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:58,098 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:26:58,099 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=OPEN, openSeqNum=10, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:26:58,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:26:58,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-11-23T08:26:58,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; OpenRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787 in 274 msec 2024-11-23T08:26:58,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, REOPEN/MOVE in 636 msec 2024-11-23T08:26:58,175 DEBUG [master/b33d9013c60a:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-23T08:26:58,182 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-23T08:26:58,182 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-23T08:26:58,189 DEBUG [master/b33d9013c60a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T08:26:58,307 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000002/launch_container.sh] 2024-11-23T08:26:58,308 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000002/container_tokens] 2024-11-23T08:26:58,308 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_0/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:26:59,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:34278 2024-11-23T08:27:00,547 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d68b63a66937da30826ba80d223788e3, had cached 0 bytes from a total of 8392 2024-11-23T08:27:01,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000003/launch_container.sh] 2024-11-23T08:27:01,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000003/container_tokens] 2024-11-23T08:27:01,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:27:02,296 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:48454 2024-11-23T08:27:02,536 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:27:02,620 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T08:27:05,723 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-23T08:27:05,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=80.41 KB heapSize=127.20 KB 2024-11-23T08:27:05,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/07a916eb7d13450a88d9f1f5db9c1b5d is 181, key is testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3./info:regioninfo/1732350418099/Put/seqid=0 2024-11-23T08:27:05,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742322_1498 (size=18857) 2024-11-23T08:27:05,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742322_1498 (size=18857) 2024-11-23T08:27:05,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742322_1498 (size=18857) 2024-11-23T08:27:05,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.25 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/07a916eb7d13450a88d9f1f5db9c1b5d 2024-11-23T08:27:05,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/e2a1993d66f64ba5a9608c6324fe73ed is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a./ns:/1732350373123/DeleteFamily/seqid=0 2024-11-23T08:27:05,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742323_1499 (size=7924) 2024-11-23T08:27:05,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742323_1499 (size=7924) 2024-11-23T08:27:05,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742323_1499 (size=7924) 2024-11-23T08:27:05,937 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/e2a1993d66f64ba5a9608c6324fe73ed 2024-11-23T08:27:05,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/4b2cc5783a8e4531ae02248bbdc08e8c is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a./rep_barrier:/1732350373123/DeleteFamily/seqid=0 2024-11-23T08:27:06,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742324_1500 (size=8195) 2024-11-23T08:27:06,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742324_1500 (size=8195) 2024-11-23T08:27:06,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742324_1500 (size=8195) 2024-11-23T08:27:06,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/4b2cc5783a8e4531ae02248bbdc08e8c 2024-11-23T08:27:06,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/380cb27b9a9e479a831431ebf7f23652 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732350353004.53c5e9809ad23e100af624566823930a./table:/1732350373123/DeleteFamily/seqid=0 2024-11-23T08:27:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742325_1501 (size=9051) 2024-11-23T08:27:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742325_1501 (size=9051) 2024-11-23T08:27:06,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742325_1501 (size=9051) 2024-11-23T08:27:06,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/380cb27b9a9e479a831431ebf7f23652 2024-11-23T08:27:06,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/07a916eb7d13450a88d9f1f5db9c1b5d as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/info/07a916eb7d13450a88d9f1f5db9c1b5d 2024-11-23T08:27:06,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/info/07a916eb7d13450a88d9f1f5db9c1b5d, entries=106, sequenceid=219, filesize=18.4 K 2024-11-23T08:27:06,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/e2a1993d66f64ba5a9608c6324fe73ed as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/ns/e2a1993d66f64ba5a9608c6324fe73ed 2024-11-23T08:27:06,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/ns/e2a1993d66f64ba5a9608c6324fe73ed, entries=24, sequenceid=219, filesize=7.7 K 2024-11-23T08:27:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/4b2cc5783a8e4531ae02248bbdc08e8c as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/rep_barrier/4b2cc5783a8e4531ae02248bbdc08e8c 2024-11-23T08:27:06,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/rep_barrier/4b2cc5783a8e4531ae02248bbdc08e8c, entries=22, sequenceid=219, filesize=8.0 K 2024-11-23T08:27:06,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/380cb27b9a9e479a831431ebf7f23652 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/table/380cb27b9a9e479a831431ebf7f23652 2024-11-23T08:27:06,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/table/380cb27b9a9e479a831431ebf7f23652, entries=39, sequenceid=219, filesize=8.8 K 2024-11-23T08:27:06,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~80.41 KB/82344, heapSize ~127.14 KB/130192, currentSize=0 B/0 for 1588230740 in 450ms, sequenceid=219, compaction requested=false 2024-11-23T08:27:06,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:27:08,306 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:48458 2024-11-23T08:27:08,429 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000005/launch_container.sh] 2024-11-23T08:27:08,429 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000005/container_tokens] 2024-11-23T08:27:08,429 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000005/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:27:10,332 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:48470 2024-11-23T08:27:11,778 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000004/launch_container.sh] 2024-11-23T08:27:11,778 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000004/container_tokens] 2024-11-23T08:27:11,778 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000004/sysfs] 2024-11-23T08:27:14,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:27:17,510 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000006/launch_container.sh] 2024-11-23T08:27:17,510 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000006/container_tokens] 2024-11-23T08:27:17,510 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000006/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:27:17,961 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000007/launch_container.sh] 2024-11-23T08:27:17,961 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000007/container_tokens] 2024-11-23T08:27:17,961 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/local-export-1732350400068/archive/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T08:27:19,345 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:50948 2024-11-23T08:27:19,346 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:51370 2024-11-23T08:27:23,903 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:43046 2024-11-23T08:27:23,916 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732350113698_0009_01_000008 is : 143 2024-11-23T08:27:23,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742321_1497 (size=30387) 2024-11-23T08:27:23,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742321_1497 (size=30387) 2024-11-23T08:27:23,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742321_1497 (size=30387) 2024-11-23T08:27:23,927 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000008/launch_container.sh] 2024-11-23T08:27:23,927 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000008/container_tokens] 2024-11-23T08:27:23,927 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000008/sysfs] 2024-11-23T08:27:23,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742326_1502 (size=460) 2024-11-23T08:27:23,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742326_1502 (size=460) 2024-11-23T08:27:23,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742326_1502 (size=460) 2024-11-23T08:27:23,982 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000009/launch_container.sh] 2024-11-23T08:27:23,982 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000009/container_tokens] 2024-11-23T08:27:23,982 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_1/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000009/sysfs] 2024-11-23T08:27:23,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742327_1503 (size=30387) 2024-11-23T08:27:23,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742327_1503 (size=30387) 2024-11-23T08:27:23,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742327_1503 (size=30387) 2024-11-23T08:27:24,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742328_1504 (size=349746) 2024-11-23T08:27:24,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742328_1504 (size=349746) 2024-11-23T08:27:24,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742328_1504 (size=349746) 2024-11-23T08:27:24,012 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:36030 2024-11-23T08:27:24,056 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7f52c75517f0bb3a74bf1656270f4674, had cached 0 bytes from a total of 5149 2024-11-23T08:27:25,732 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732350113698_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:27:25,733 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733 2024-11-23T08:27:25,733 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:25,756 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:25,756 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T08:27:25,758 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:27:25,762 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T08:27:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742329_1505 (size=156) 2024-11-23T08:27:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742329_1505 (size=156) 2024-11-23T08:27:25,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742329_1505 (size=156) 2024-11-23T08:27:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742330_1506 (size=621) 2024-11-23T08:27:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742330_1506 (size=621) 2024-11-23T08:27:25,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742330_1506 (size=621) 2024-11-23T08:27:25,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:25,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:25,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-791075987637582642.jar 2024-11-23T08:27:26,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-16457617657757252776.jar 2024-11-23T08:27:26,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:27:26,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:27:26,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:26,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742331_1507 (size=131440) 2024-11-23T08:27:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742331_1507 (size=131440) 2024-11-23T08:27:26,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742331_1507 (size=131440) 2024-11-23T08:27:26,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742332_1508 (size=4188619) 2024-11-23T08:27:26,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742332_1508 (size=4188619) 2024-11-23T08:27:26,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742332_1508 (size=4188619) 2024-11-23T08:27:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742333_1509 (size=1323991) 2024-11-23T08:27:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742333_1509 (size=1323991) 2024-11-23T08:27:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742333_1509 (size=1323991) 2024-11-23T08:27:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742334_1510 (size=903733) 2024-11-23T08:27:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742334_1510 (size=903733) 2024-11-23T08:27:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742334_1510 (size=903733) 2024-11-23T08:27:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742335_1511 (size=8360083) 2024-11-23T08:27:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742335_1511 (size=8360083) 2024-11-23T08:27:26,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742335_1511 (size=8360083) 2024-11-23T08:27:26,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742336_1512 (size=440957) 2024-11-23T08:27:26,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742336_1512 (size=440957) 2024-11-23T08:27:26,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742336_1512 (size=440957) 2024-11-23T08:27:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742337_1513 (size=6424742) 2024-11-23T08:27:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742337_1513 (size=6424742) 2024-11-23T08:27:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742337_1513 (size=6424742) 2024-11-23T08:27:26,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742338_1514 (size=1877034) 2024-11-23T08:27:26,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742338_1514 (size=1877034) 2024-11-23T08:27:26,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742338_1514 (size=1877034) 2024-11-23T08:27:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742339_1515 (size=77835) 2024-11-23T08:27:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742339_1515 (size=77835) 2024-11-23T08:27:26,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742339_1515 (size=77835) 2024-11-23T08:27:26,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742340_1516 (size=30949) 2024-11-23T08:27:26,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742340_1516 (size=30949) 2024-11-23T08:27:26,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742340_1516 (size=30949) 2024-11-23T08:27:26,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742341_1517 (size=1597347) 2024-11-23T08:27:26,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742341_1517 (size=1597347) 2024-11-23T08:27:26,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742341_1517 (size=1597347) 2024-11-23T08:27:26,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742342_1518 (size=4695811) 2024-11-23T08:27:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742342_1518 (size=4695811) 2024-11-23T08:27:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742342_1518 (size=4695811) 2024-11-23T08:27:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742343_1519 (size=232957) 2024-11-23T08:27:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742343_1519 (size=232957) 2024-11-23T08:27:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742343_1519 (size=232957) 2024-11-23T08:27:26,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742344_1520 (size=127628) 2024-11-23T08:27:26,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742344_1520 (size=127628) 2024-11-23T08:27:26,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742344_1520 (size=127628) 2024-11-23T08:27:26,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742345_1521 (size=20406) 2024-11-23T08:27:26,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742345_1521 (size=20406) 2024-11-23T08:27:26,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742345_1521 (size=20406) 2024-11-23T08:27:26,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742346_1522 (size=5175431) 2024-11-23T08:27:26,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742346_1522 (size=5175431) 2024-11-23T08:27:26,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742346_1522 (size=5175431) 2024-11-23T08:27:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742347_1523 (size=217634) 2024-11-23T08:27:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742347_1523 (size=217634) 2024-11-23T08:27:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742347_1523 (size=217634) 2024-11-23T08:27:26,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742348_1524 (size=1832290) 2024-11-23T08:27:26,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742348_1524 (size=1832290) 2024-11-23T08:27:26,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742348_1524 (size=1832290) 2024-11-23T08:27:26,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742349_1525 (size=322274) 2024-11-23T08:27:26,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742349_1525 (size=322274) 2024-11-23T08:27:26,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742349_1525 (size=322274) 2024-11-23T08:27:26,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742350_1526 (size=503880) 2024-11-23T08:27:26,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742350_1526 (size=503880) 2024-11-23T08:27:26,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742350_1526 (size=503880) 2024-11-23T08:27:26,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742351_1527 (size=29229) 2024-11-23T08:27:26,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742351_1527 (size=29229) 2024-11-23T08:27:26,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742351_1527 (size=29229) 2024-11-23T08:27:26,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742352_1528 (size=24096) 2024-11-23T08:27:26,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742352_1528 (size=24096) 2024-11-23T08:27:26,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742352_1528 (size=24096) 2024-11-23T08:27:26,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742353_1529 (size=111872) 2024-11-23T08:27:26,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742353_1529 (size=111872) 2024-11-23T08:27:26,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742353_1529 (size=111872) 2024-11-23T08:27:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742354_1530 (size=45609) 2024-11-23T08:27:26,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742354_1530 (size=45609) 2024-11-23T08:27:26,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742354_1530 (size=45609) 2024-11-23T08:27:27,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742355_1531 (size=136454) 2024-11-23T08:27:27,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742355_1531 (size=136454) 2024-11-23T08:27:27,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742355_1531 (size=136454) 2024-11-23T08:27:27,007 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:27:27,010 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T08:27:27,011 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-11-23T08:27:27,011 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-11-23T08:27:27,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742356_1532 (size=441) 2024-11-23T08:27:27,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742356_1532 (size=441) 2024-11-23T08:27:27,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742356_1532 (size=441) 2024-11-23T08:27:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742357_1533 (size=21) 2024-11-23T08:27:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742357_1533 (size=21) 2024-11-23T08:27:27,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742357_1533 (size=21) 2024-11-23T08:27:27,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742358_1534 (size=303996) 2024-11-23T08:27:27,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742358_1534 (size=303996) 2024-11-23T08:27:27,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742358_1534 (size=303996) 2024-11-23T08:27:30,081 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:27:30,081 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:27:30,085 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0009_000001 (auth:SIMPLE) from 127.0.0.1:36036 2024-11-23T08:27:30,099 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000001/launch_container.sh] 2024-11-23T08:27:30,099 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000001/container_tokens] 2024-11-23T08:27:30,099 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0009/container_1732350113698_0009_01_000001/sysfs] 2024-11-23T08:27:30,925 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:43056 2024-11-23T08:27:36,261 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:50706 2024-11-23T08:27:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742359_1535 (size=349694) 2024-11-23T08:27:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742359_1535 (size=349694) 2024-11-23T08:27:36,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742359_1535 (size=349694) 2024-11-23T08:27:38,837 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:55608 2024-11-23T08:27:38,858 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:60064 2024-11-23T08:27:42,319 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31a9b15322d53a7b44d2ddb2b2ab3b79, had cached 0 bytes from a total of 5216 2024-11-23T08:27:43,006 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 82159aebc53b39a493b146f912661aa3, had cached 0 bytes from a total of 8460 2024-11-23T08:27:44,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742360_1536 (size=8460) 2024-11-23T08:27:44,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742360_1536 (size=8460) 2024-11-23T08:27:44,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742360_1536 (size=8460) 2024-11-23T08:27:44,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000002/launch_container.sh] 2024-11-23T08:27:44,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000002/container_tokens] 2024-11-23T08:27:44,398 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000002/sysfs] 2024-11-23T08:27:44,708 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:27:45,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742362_1538 (size=5149) 2024-11-23T08:27:45,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742362_1538 (size=5149) 2024-11-23T08:27:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742362_1538 (size=5149) 2024-11-23T08:27:45,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742361_1537 (size=22153) 2024-11-23T08:27:45,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742361_1537 (size=22153) 2024-11-23T08:27:45,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742361_1537 (size=22153) 2024-11-23T08:27:45,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742363_1539 (size=463) 2024-11-23T08:27:45,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742363_1539 (size=463) 2024-11-23T08:27:45,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742363_1539 (size=463) 2024-11-23T08:27:45,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742364_1540 (size=22153) 2024-11-23T08:27:45,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742364_1540 (size=22153) 2024-11-23T08:27:45,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742364_1540 (size=22153) 2024-11-23T08:27:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742365_1541 (size=349694) 2024-11-23T08:27:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742365_1541 (size=349694) 2024-11-23T08:27:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742365_1541 (size=349694) 2024-11-23T08:27:45,353 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:55956 2024-11-23T08:27:45,367 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000003/launch_container.sh] 2024-11-23T08:27:45,367 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000003/container_tokens] 2024-11-23T08:27:45,367 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000003/sysfs] 2024-11-23T08:27:45,547 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d68b63a66937da30826ba80d223788e3, had cached 0 bytes from a total of 8392 2024-11-23T08:27:47,333 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:27:47,334 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:27:47,338 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-11-23T08:27:47,338 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:27:47,338 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:27:47,338 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T08:27:47,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T08:27:47,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T08:27:47,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T08:27:47,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T08:27:47,339 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350445733/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T08:27:47,345 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-23T08:27:47,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T08:27:47,348 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350467348"}]},"ts":"1732350467348"} 2024-11-23T08:27:47,350 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-23T08:27:47,350 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-23T08:27:47,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-23T08:27:47,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, UNASSIGN}] 2024-11-23T08:27:47,353 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, UNASSIGN 2024-11-23T08:27:47,353 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, UNASSIGN 2024-11-23T08:27:47,354 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=CLOSING, regionLocation=b33d9013c60a,45199,1732350106787 2024-11-23T08:27:47,354 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=7f52c75517f0bb3a74bf1656270f4674, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:27:47,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, UNASSIGN because future has completed 2024-11-23T08:27:47,356 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:27:47,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787}] 2024-11-23T08:27:47,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, UNASSIGN because future has completed 2024-11-23T08:27:47,357 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:27:47,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:27:47,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T08:27:47,510 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close 82159aebc53b39a493b146f912661aa3 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:27:47,510 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing 82159aebc53b39a493b146f912661aa3, disabling compactions & flushes 2024-11-23T08:27:47,510 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing 7f52c75517f0bb3a74bf1656270f4674, disabling compactions & flushes 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:27:47,510 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. after waiting 0 ms 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. after waiting 0 ms 2024-11-23T08:27:47,510 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:27:47,516 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-11-23T08:27:47,516 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:27:47,517 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:27:47,517 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:27:47,517 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3. 2024-11-23T08:27:47,517 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674. 2024-11-23T08:27:47,517 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for 7f52c75517f0bb3a74bf1656270f4674: Waiting for close lock at 1732350467510Running coprocessor pre-close hooks at 1732350467510Disabling compacts and flushes for region at 1732350467510Disabling writes for close at 1732350467510Writing region close event to WAL at 1732350467511 (+1 ms)Running coprocessor post-close hooks at 1732350467517 (+6 ms)Closed at 1732350467517 2024-11-23T08:27:47,517 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for 82159aebc53b39a493b146f912661aa3: Waiting for close lock at 1732350467510Running coprocessor pre-close hooks at 1732350467510Disabling compacts and flushes for region at 1732350467510Disabling writes for close at 1732350467510Writing region close event to WAL at 1732350467511 (+1 ms)Running coprocessor post-close hooks at 1732350467517 (+6 ms)Closed at 1732350467517 2024-11-23T08:27:47,518 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed 7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:27:47,519 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=7f52c75517f0bb3a74bf1656270f4674, regionState=CLOSED 2024-11-23T08:27:47,520 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed 82159aebc53b39a493b146f912661aa3 2024-11-23T08:27:47,520 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=82159aebc53b39a493b146f912661aa3, regionState=CLOSED 2024-11-23T08:27:47,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:27:47,521 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787 because future has completed 2024-11-23T08:27:47,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-11-23T08:27:47,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure 7f52c75517f0bb3a74bf1656270f4674, server=b33d9013c60a,39649,1732350107049 in 164 msec 2024-11-23T08:27:47,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-11-23T08:27:47,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=7f52c75517f0bb3a74bf1656270f4674, UNASSIGN in 171 msec 2024-11-23T08:27:47,524 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure 82159aebc53b39a493b146f912661aa3, server=b33d9013c60a,45199,1732350106787 in 166 msec 2024-11-23T08:27:47,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=233, resume processing ppid=231 2024-11-23T08:27:47,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=82159aebc53b39a493b146f912661aa3, UNASSIGN in 171 msec 2024-11-23T08:27:47,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-11-23T08:27:47,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 174 msec 2024-11-23T08:27:47,528 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350467528"}]},"ts":"1732350467528"} 2024-11-23T08:27:47,529 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-23T08:27:47,529 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-23T08:27:47,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 185 msec 2024-11-23T08:27:47,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T08:27:47,667 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T08:27:47,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-23T08:27:47,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,669 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-23T08:27:47,670 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,672 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-23T08:27:47,686 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:27:47,687 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:27:47,688 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits] 2024-11-23T08:27:47,688 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/recovered.edits] 2024-11-23T08:27:47,692 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/cf/45d0e2f692db4f7aa0d63319aaf2dd27 2024-11-23T08:27:47,692 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/cf/971effed9ebc4be4afd59b3b42d99a88 2024-11-23T08:27:47,694 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits/12.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3/recovered.edits/12.seqid 2024-11-23T08:27:47,694 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674/recovered.edits/9.seqid 2024-11-23T08:27:47,694 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/82159aebc53b39a493b146f912661aa3 2024-11-23T08:27:47,694 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportWithChecksum/7f52c75517f0bb3a74bf1656270f4674 2024-11-23T08:27:47,694 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-23T08:27:47,696 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,702 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-23T08:27:47,704 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-23T08:27:47,706 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,706 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-23T08:27:47,706 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350467706"}]},"ts":"9223372036854775807"} 2024-11-23T08:27:47,706 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350467706"}]},"ts":"9223372036854775807"} 2024-11-23T08:27:47,708 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:27:47,708 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7f52c75517f0bb3a74bf1656270f4674, NAME => 'testtb-testExportWithChecksum,,1732350398723.7f52c75517f0bb3a74bf1656270f4674.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 82159aebc53b39a493b146f912661aa3, NAME => 'testtb-testExportWithChecksum,1,1732350398723.82159aebc53b39a493b146f912661aa3.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:27:47,708 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-23T08:27:47,708 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350467708"}]},"ts":"9223372036854775807"} 2024-11-23T08:27:47,709 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-23T08:27:47,710 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T08:27:47,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 43 msec 2024-11-23T08:27:47,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T08:27:47,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T08:27:47,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T08:27:47,735 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:47,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T08:27:47,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:47,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:47,744 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-23T08:27:47,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:47,744 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:47,744 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T08:27:47,748 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T08:27:47,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-23T08:27:47,751 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T08:27:47,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-23T08:27:47,771 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=818 (was 820), OpenFileDescriptor=819 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=849 (was 617) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1793 (was 2304) 2024-11-23T08:27:47,771 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-11-23T08:27:47,786 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=818, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=849, ProcessCount=23, AvailableMemoryMB=1793 2024-11-23T08:27:47,786 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-11-23T08:27:47,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T08:27:47,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:47,790 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T08:27:47,790 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:27:47,790 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-11-23T08:27:47,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T08:27:47,791 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T08:27:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742366_1542 (size=418) 2024-11-23T08:27:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742366_1542 (size=418) 2024-11-23T08:27:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742366_1542 (size=418) 2024-11-23T08:27:47,798 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 19aecca552920d1bec6fe5848db649c4, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:47,798 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => faeb5f40e361a46141cc129e6a301658, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742367_1543 (size=79) 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742368_1544 (size=79) 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742367_1543 (size=79) 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742368_1544 (size=79) 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742368_1544 (size=79) 2024-11-23T08:27:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742367_1543 (size=79) 2024-11-23T08:27:47,808 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 19aecca552920d1bec6fe5848db649c4, disabling compactions & flushes 2024-11-23T08:27:47,809 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing faeb5f40e361a46141cc129e6a301658, disabling compactions & flushes 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:47,809 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. after waiting 0 ms 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. after waiting 0 ms 2024-11-23T08:27:47,809 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:47,809 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 19aecca552920d1bec6fe5848db649c4: Waiting for close lock at 1732350467809Disabling compacts and flushes for region at 1732350467809Disabling writes for close at 1732350467809Writing region close event to WAL at 1732350467809Closed at 1732350467809 2024-11-23T08:27:47,809 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for faeb5f40e361a46141cc129e6a301658: Waiting for close lock at 1732350467809Disabling compacts and flushes for region at 1732350467809Disabling writes for close at 1732350467809Writing region close event to WAL at 1732350467809Closed at 1732350467809 2024-11-23T08:27:47,810 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T08:27:47,810 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732350467810"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350467810"}]},"ts":"1732350467810"} 2024-11-23T08:27:47,810 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732350467810"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732350467810"}]},"ts":"1732350467810"} 2024-11-23T08:27:47,812 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T08:27:47,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T08:27:47,813 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350467812"}]},"ts":"1732350467812"} 2024-11-23T08:27:47,814 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-23T08:27:47,814 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b33d9013c60a=0} racks are {/default-rack=0} 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T08:27:47,816 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T08:27:47,816 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T08:27:47,816 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T08:27:47,816 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T08:27:47,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, ASSIGN}] 2024-11-23T08:27:47,817 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, ASSIGN 2024-11-23T08:27:47,817 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, ASSIGN 2024-11-23T08:27:47,818 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, ASSIGN; state=OFFLINE, location=b33d9013c60a,43417,1732350106962; forceNewPlan=false, retain=false 2024-11-23T08:27:47,818 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, ASSIGN; state=OFFLINE, location=b33d9013c60a,39649,1732350107049; forceNewPlan=false, retain=false 2024-11-23T08:27:47,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T08:27:47,968 INFO [b33d9013c60a:38501 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T08:27:47,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=19aecca552920d1bec6fe5848db649c4, regionState=OPENING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:27:47,969 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=faeb5f40e361a46141cc129e6a301658, regionState=OPENING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:27:47,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, ASSIGN because future has completed 2024-11-23T08:27:47,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:27:47,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, ASSIGN because future has completed 2024-11-23T08:27:47,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:27:48,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T08:27:48,126 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:48,127 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => 19aecca552920d1bec6fe5848db649c4, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T08:27:48,127 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. service=AccessControlService 2024-11-23T08:27:48,128 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:27:48,128 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,128 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:27:48,128 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,128 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,129 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:48,129 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => faeb5f40e361a46141cc129e6a301658, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T08:27:48,130 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. service=AccessControlService 2024-11-23T08:27:48,130 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T08:27:48,130 INFO [StoreOpener-19aecca552920d1bec6fe5848db649c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,130 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,130 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T08:27:48,130 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,130 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,132 INFO [StoreOpener-19aecca552920d1bec6fe5848db649c4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 19aecca552920d1bec6fe5848db649c4 columnFamilyName cf 2024-11-23T08:27:48,132 INFO [StoreOpener-faeb5f40e361a46141cc129e6a301658-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,132 DEBUG [StoreOpener-19aecca552920d1bec6fe5848db649c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:27:48,132 INFO [StoreOpener-19aecca552920d1bec6fe5848db649c4-1 {}] regionserver.HStore(327): Store=19aecca552920d1bec6fe5848db649c4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:27:48,133 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,133 INFO [StoreOpener-faeb5f40e361a46141cc129e6a301658-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region faeb5f40e361a46141cc129e6a301658 columnFamilyName cf 2024-11-23T08:27:48,133 DEBUG [StoreOpener-faeb5f40e361a46141cc129e6a301658-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T08:27:48,134 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,134 INFO [StoreOpener-faeb5f40e361a46141cc129e6a301658-1 {}] regionserver.HStore(327): Store=faeb5f40e361a46141cc129e6a301658/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T08:27:48,134 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,134 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,134 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,135 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,135 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,135 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,136 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,136 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,136 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,137 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,138 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:27:48,139 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened 19aecca552920d1bec6fe5848db649c4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64884428, jitterRate=-0.033146679401397705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:27:48,139 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:48,139 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T08:27:48,139 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for 19aecca552920d1bec6fe5848db649c4: Running coprocessor pre-open hook at 1732350468128Writing region info on filesystem at 1732350468128Initializing all the Stores at 1732350468129 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350468129Cleaning up temporary data from old regions at 1732350468135 (+6 ms)Running coprocessor post-open hooks at 1732350468139 (+4 ms)Region opened successfully at 1732350468139 2024-11-23T08:27:48,140 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened faeb5f40e361a46141cc129e6a301658; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63585445, jitterRate=-0.052503034472465515}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T08:27:48,140 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,140 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for faeb5f40e361a46141cc129e6a301658: Running coprocessor pre-open hook at 1732350468131Writing region info on filesystem at 1732350468131Initializing all the Stores at 1732350468131Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732350468131Cleaning up temporary data from old regions at 1732350468136 (+5 ms)Running coprocessor post-open hooks at 1732350468140 (+4 ms)Region opened successfully at 1732350468140 2024-11-23T08:27:48,140 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4., pid=240, masterSystemTime=1732350468123 2024-11-23T08:27:48,141 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658., pid=241, masterSystemTime=1732350468124 2024-11-23T08:27:48,142 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:48,142 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:48,142 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=19aecca552920d1bec6fe5848db649c4, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:27:48,142 DEBUG [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:48,142 INFO [RS_OPEN_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:48,143 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=faeb5f40e361a46141cc129e6a301658, regionState=OPEN, openSeqNum=2, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:27:48,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:27:48,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:27:48,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-11-23T08:27:48,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049 in 173 msec 2024-11-23T08:27:48,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=238 2024-11-23T08:27:48,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962 in 172 msec 2024-11-23T08:27:48,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, ASSIGN in 330 msec 2024-11-23T08:27:48,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=237 2024-11-23T08:27:48,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, ASSIGN in 330 msec 2024-11-23T08:27:48,148 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T08:27:48,148 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350468148"}]},"ts":"1732350468148"} 2024-11-23T08:27:48,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-23T08:27:48,150 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T08:27:48,150 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-23T08:27:48,153 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T08:27:48,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:48,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:48,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:48,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T08:27:48,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 397 msec 2024-11-23T08:27:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T08:27:48,417 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T08:27:48,417 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-23T08:27:48,418 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:27:48,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-23T08:27:48,425 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:27:48,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-23T08:27:48,425 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T08:27:48,428 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T08:27:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350468428 (current time:1732350468428). 2024-11-23T08:27:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:27:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T08:27:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:27:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60758d93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:27:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:27:48,430 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:27:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:27:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:27:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6875a7e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:27:48,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:27:48,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,432 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:27:48,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@700e404b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:27:48,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:27:48,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:48,435 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:48,442 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:27:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:27:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,443 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:27:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@99232bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:27:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:27:48,444 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7eae245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:27:48,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,446 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:27:48,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b906e9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:27:48,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:27:48,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:48,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:48,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:48,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:48,454 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:48,455 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:27:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:27:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T08:27:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:27:48,458 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:27:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T08:27:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-23T08:27:48,459 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:27:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T08:27:48,460 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:27:48,462 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:27:48,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742369_1545 (size=203) 2024-11-23T08:27:48,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742369_1545 (size=203) 2024-11-23T08:27:48,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742369_1545 (size=203) 2024-11-23T08:27:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T08:27:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T08:27:48,881 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:27:48,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4}] 2024-11-23T08:27:48,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:48,884 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-11-23T08:27:49,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for faeb5f40e361a46141cc129e6a301658: 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 19aecca552920d1bec6fe5848db649c4: 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:27:49,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T08:27:49,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742370_1546 (size=82) 2024-11-23T08:27:49,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742370_1546 (size=82) 2024-11-23T08:27:49,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742370_1546 (size=82) 2024-11-23T08:27:49,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:49,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-11-23T08:27:49,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742371_1547 (size=82) 2024-11-23T08:27:49,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742371_1547 (size=82) 2024-11-23T08:27:49,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742371_1547 (size=82) 2024-11-23T08:27:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-11-23T08:27:49,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:49,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-11-23T08:27:49,043 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:49,043 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-11-23T08:27:49,043 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,044 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 in 163 msec 2024-11-23T08:27:49,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-11-23T08:27:49,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 in 163 msec 2024-11-23T08:27:49,045 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:27:49,046 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:27:49,046 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:27:49,047 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,047 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742372_1548 (size=585) 2024-11-23T08:27:49,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742372_1548 (size=585) 2024-11-23T08:27:49,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742372_1548 (size=585) 2024-11-23T08:27:49,060 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:27:49,063 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:27:49,064 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:27:49,065 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-23T08:27:49,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 608 msec 2024-11-23T08:27:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T08:27:49,087 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T08:27:49,091 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='07e043c28fe641494c6435ae50ce8f929', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658., hostname=b33d9013c60a,43417,1732350106962, seqNum=2] 2024-11-23T08:27:49,092 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='107c46688c7473e258f52eb11129f7093', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:49,092 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2b8116decf22f792e9bb74481a8a0457d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:49,093 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='30b5482236e7ad998fe23a1900f629a62', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:49,095 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4b6d700ea79d97d42069729745a364765', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:49,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43417 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:27:49,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39649 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T08:27:49,100 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T08:27:49,102 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,102 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:49,102 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T08:27:49,104 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T08:27:49,109 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T08:27:49,115 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T08:27:49,118 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732350469118 (current time:1732350469118). 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23f7fcc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:27:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:27:49,119 WARN [regionserver/b33d9013c60a:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-11-23T08:27:49,119 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:27:49,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:27:49,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:27:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d8c06ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:27:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:27:49,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,120 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:27:49,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b34c21e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:27:49,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:27:49,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:49,123 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:49,125 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:27:49,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:27:49,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,125 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:27:49,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bcdc345, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ClusterIdFetcher(90): Going to request b33d9013c60a,38501,-1 for getting cluster id 2024-11-23T08:27:49,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T08:27:49,126 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c4199b0-a333-4032-bd3c-7e578b25122d' 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c4199b0-a333-4032-bd3c-7e578b25122d" 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48ccc49e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b33d9013c60a,38501,-1] 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T08:27:49,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,128 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T08:27:49,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@631825fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T08:27:49,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T08:27:49,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b33d9013c60a,43417,1732350106962, seqNum=-1] 2024-11-23T08:27:49,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:49,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:49,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., hostname=b33d9013c60a,39649,1732350107049, seqNum=2] 2024-11-23T08:27:49,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T08:27:49,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51788, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T08:27:49,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501. 2024-11-23T08:27:49,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor274.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T08:27:49,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:27:49,135 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:27:49,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T08:27:49,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T08:27:49,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T08:27:49,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-23T08:27:49,137 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T08:27:49,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T08:27:49,138 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T08:27:49,139 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T08:27:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742373_1549 (size=198) 2024-11-23T08:27:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742373_1549 (size=198) 2024-11-23T08:27:49,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742373_1549 (size=198) 2024-11-23T08:27:49,147 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T08:27:49,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4}] 2024-11-23T08:27:49,148 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,148 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:49,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T08:27:49,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43417 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-11-23T08:27:49,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39649 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-11-23T08:27:49,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:49,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:49,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 19aecca552920d1bec6fe5848db649c4 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T08:27:49,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing faeb5f40e361a46141cc129e6a301658 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T08:27:49,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/.tmp/cf/b2d78e62a2fb45ebb18b72237c859863 is 71, key is 188b3b48a5bcb939d50713584693cf96/cf:q/1732350469099/Put/seqid=0 2024-11-23T08:27:49,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/.tmp/cf/db40151d2f95430495694480b736c708 is 71, key is 02aa9b318ab4a0da223b88989bf7d57d/cf:q/1732350469097/Put/seqid=0 2024-11-23T08:27:49,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742374_1550 (size=5490) 2024-11-23T08:27:49,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742374_1550 (size=5490) 2024-11-23T08:27:49,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742374_1550 (size=5490) 2024-11-23T08:27:49,330 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/.tmp/cf/db40151d2f95430495694480b736c708 2024-11-23T08:27:49,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/.tmp/cf/db40151d2f95430495694480b736c708 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708 2024-11-23T08:27:49,339 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708, entries=6, sequenceid=6, filesize=5.4 K 2024-11-23T08:27:49,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742375_1551 (size=8122) 2024-11-23T08:27:49,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742375_1551 (size=8122) 2024-11-23T08:27:49,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742375_1551 (size=8122) 2024-11-23T08:27:49,340 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for faeb5f40e361a46141cc129e6a301658 in 41ms, sequenceid=6, compaction requested=false 2024-11-23T08:27:49,340 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/.tmp/cf/b2d78e62a2fb45ebb18b72237c859863 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for faeb5f40e361a46141cc129e6a301658: 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708] hfiles 2024-11-23T08:27:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/.tmp/cf/b2d78e62a2fb45ebb18b72237c859863 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863 2024-11-23T08:27:49,350 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863, entries=44, sequenceid=6, filesize=7.9 K 2024-11-23T08:27:49,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742376_1552 (size=121) 2024-11-23T08:27:49,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742376_1552 (size=121) 2024-11-23T08:27:49,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742376_1552 (size=121) 2024-11-23T08:27:49,352 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 19aecca552920d1bec6fe5848db649c4 in 53ms, sequenceid=6, compaction requested=false 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 19aecca552920d1bec6fe5848db649c4: 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863] hfiles 2024-11-23T08:27:49,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742377_1553 (size=121) 2024-11-23T08:27:49,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742377_1553 (size=121) 2024-11-23T08:27:49,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742377_1553 (size=121) 2024-11-23T08:27:49,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:27:49,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-11-23T08:27:49,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-11-23T08:27:49,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,365 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:27:49,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 19aecca552920d1bec6fe5848db649c4 in 219 msec 2024-11-23T08:27:49,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T08:27:49,729 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 44027 2024-11-23T08:27:49,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:27:49,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b33d9013c60a:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-11-23T08:27:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-11-23T08:27:49,752 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:49,752 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 2024-11-23T08:27:49,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-11-23T08:27:49,756 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T08:27:49,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure faeb5f40e361a46141cc129e6a301658 in 606 msec 2024-11-23T08:27:49,757 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T08:27:49,758 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T08:27:49,758 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,758 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742378_1554 (size=663) 2024-11-23T08:27:49,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742378_1554 (size=663) 2024-11-23T08:27:49,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742378_1554 (size=663) 2024-11-23T08:27:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T08:27:49,768 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T08:27:49,773 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T08:27:49,773 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:49,774 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T08:27:49,774 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-23T08:27:49,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 639 msec 2024-11-23T08:27:50,064 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 19aecca552920d1bec6fe5848db649c4 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:27:50,065 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 31a9b15322d53a7b44d2ddb2b2ab3b79 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:27:50,065 DEBUG [master/b33d9013c60a:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region faeb5f40e361a46141cc129e6a301658 changed from -1.0 to 0.0, refreshing cache 2024-11-23T08:27:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T08:27:50,278 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T08:27:50,278 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278 2024-11-23T08:27:50,278 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:34105, tgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278, rawTgtDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278, srcFsUri=hdfs://localhost:34105, srcDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:50,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:34105, inputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8 2024-11-23T08:27:50,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:50,309 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T08:27:50,312 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:50,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742379_1555 (size=663) 2024-11-23T08:27:50,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742379_1555 (size=663) 2024-11-23T08:27:50,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742379_1555 (size=663) 2024-11-23T08:27:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742380_1556 (size=198) 2024-11-23T08:27:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742380_1556 (size=198) 2024-11-23T08:27:50,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742380_1556 (size=198) 2024-11-23T08:27:50,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:50,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:50,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,420 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0010_000001 (auth:SIMPLE) from 127.0.0.1:42186 2024-11-23T08:27:51,434 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000001/launch_container.sh] 2024-11-23T08:27:51,434 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000001/container_tokens] 2024-11-23T08:27:51,434 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_3/usercache/jenkins/appcache/application_1732350113698_0010/container_1732350113698_0010_01_000001/sysfs] 2024-11-23T08:27:51,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-9566009815555964104.jar 2024-11-23T08:27:51,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,452 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop-16537769682909837827.jar 2024-11-23T08:27:51,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T08:27:51,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T08:27:51,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T08:27:51,522 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T08:27:51,523 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T08:27:51,523 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T08:27:51,523 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T08:27:51,523 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T08:27:51,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T08:27:51,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T08:27:51,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T08:27:51,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T08:27:51,524 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:51,525 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T08:27:51,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742381_1557 (size=131440) 2024-11-23T08:27:51,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742381_1557 (size=131440) 2024-11-23T08:27:51,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742381_1557 (size=131440) 2024-11-23T08:27:51,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742382_1558 (size=4188619) 2024-11-23T08:27:51,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742382_1558 (size=4188619) 2024-11-23T08:27:51,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742382_1558 (size=4188619) 2024-11-23T08:27:51,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742383_1559 (size=1323991) 2024-11-23T08:27:51,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742383_1559 (size=1323991) 2024-11-23T08:27:51,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742383_1559 (size=1323991) 2024-11-23T08:27:51,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742384_1560 (size=440957) 2024-11-23T08:27:51,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742384_1560 (size=440957) 2024-11-23T08:27:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742384_1560 (size=440957) 2024-11-23T08:27:51,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742385_1561 (size=903733) 2024-11-23T08:27:51,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742385_1561 (size=903733) 2024-11-23T08:27:51,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742385_1561 (size=903733) 2024-11-23T08:27:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742386_1562 (size=8360083) 2024-11-23T08:27:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742386_1562 (size=8360083) 2024-11-23T08:27:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742386_1562 (size=8360083) 2024-11-23T08:27:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742387_1563 (size=1877034) 2024-11-23T08:27:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742387_1563 (size=1877034) 2024-11-23T08:27:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742387_1563 (size=1877034) 2024-11-23T08:27:51,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742388_1564 (size=77835) 2024-11-23T08:27:51,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742388_1564 (size=77835) 2024-11-23T08:27:51,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742388_1564 (size=77835) 2024-11-23T08:27:52,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742389_1565 (size=30949) 2024-11-23T08:27:52,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742389_1565 (size=30949) 2024-11-23T08:27:52,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742389_1565 (size=30949) 2024-11-23T08:27:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742390_1566 (size=1597347) 2024-11-23T08:27:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742390_1566 (size=1597347) 2024-11-23T08:27:52,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742390_1566 (size=1597347) 2024-11-23T08:27:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742391_1567 (size=4695811) 2024-11-23T08:27:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742391_1567 (size=4695811) 2024-11-23T08:27:52,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742391_1567 (size=4695811) 2024-11-23T08:27:52,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742392_1568 (size=232957) 2024-11-23T08:27:52,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742392_1568 (size=232957) 2024-11-23T08:27:52,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742392_1568 (size=232957) 2024-11-23T08:27:52,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742393_1569 (size=127628) 2024-11-23T08:27:52,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742393_1569 (size=127628) 2024-11-23T08:27:52,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742393_1569 (size=127628) 2024-11-23T08:27:52,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742394_1570 (size=20406) 2024-11-23T08:27:52,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742394_1570 (size=20406) 2024-11-23T08:27:52,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742394_1570 (size=20406) 2024-11-23T08:27:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742395_1571 (size=5175431) 2024-11-23T08:27:52,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742395_1571 (size=5175431) 2024-11-23T08:27:52,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742395_1571 (size=5175431) 2024-11-23T08:27:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742396_1572 (size=217634) 2024-11-23T08:27:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742396_1572 (size=217634) 2024-11-23T08:27:52,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742396_1572 (size=217634) 2024-11-23T08:27:52,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742397_1573 (size=1832290) 2024-11-23T08:27:52,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742397_1573 (size=1832290) 2024-11-23T08:27:52,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742397_1573 (size=1832290) 2024-11-23T08:27:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742398_1574 (size=322274) 2024-11-23T08:27:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742398_1574 (size=322274) 2024-11-23T08:27:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742398_1574 (size=322274) 2024-11-23T08:27:52,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742399_1575 (size=503880) 2024-11-23T08:27:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742399_1575 (size=503880) 2024-11-23T08:27:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742399_1575 (size=503880) 2024-11-23T08:27:52,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T08:27:52,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T08:27:52,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T08:27:52,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742401_1577 (size=29229) 2024-11-23T08:27:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742401_1577 (size=29229) 2024-11-23T08:27:52,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742401_1577 (size=29229) 2024-11-23T08:27:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742402_1578 (size=24096) 2024-11-23T08:27:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742402_1578 (size=24096) 2024-11-23T08:27:52,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742402_1578 (size=24096) 2024-11-23T08:27:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742403_1579 (size=111872) 2024-11-23T08:27:52,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742403_1579 (size=111872) 2024-11-23T08:27:52,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742403_1579 (size=111872) 2024-11-23T08:27:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742404_1580 (size=45609) 2024-11-23T08:27:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742404_1580 (size=45609) 2024-11-23T08:27:52,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742404_1580 (size=45609) 2024-11-23T08:27:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742405_1581 (size=136454) 2024-11-23T08:27:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742405_1581 (size=136454) 2024-11-23T08:27:52,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742405_1581 (size=136454) 2024-11-23T08:27:52,709 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T08:27:52,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-23T08:27:52,713 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-11-23T08:27:52,713 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-11-23T08:27:52,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742406_1582 (size=469) 2024-11-23T08:27:52,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742406_1582 (size=469) 2024-11-23T08:27:52,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742406_1582 (size=469) 2024-11-23T08:27:52,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742407_1583 (size=21) 2024-11-23T08:27:52,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742407_1583 (size=21) 2024-11-23T08:27:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742407_1583 (size=21) 2024-11-23T08:27:52,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742408_1584 (size=304172) 2024-11-23T08:27:52,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742408_1584 (size=304172) 2024-11-23T08:27:52,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742408_1584 (size=304172) 2024-11-23T08:27:52,777 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:27:52,777 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T08:27:53,039 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:27:53,352 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:38896 2024-11-23T08:27:56,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:27:56,247 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-23T08:27:56,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T08:27:59,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:38992 2024-11-23T08:28:00,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742409_1585 (size=349894) 2024-11-23T08:28:00,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742409_1585 (size=349894) 2024-11-23T08:28:00,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742409_1585 (size=349894) 2024-11-23T08:28:01,750 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:28:02,170 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:52280 2024-11-23T08:28:02,176 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:55142 2024-11-23T08:28:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742410_1586 (size=8122) 2024-11-23T08:28:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742410_1586 (size=8122) 2024-11-23T08:28:06,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742410_1586 (size=8122) 2024-11-23T08:28:06,950 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000002/launch_container.sh] 2024-11-23T08:28:06,950 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000002/container_tokens] 2024-11-23T08:28:06,950 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_3/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000002/sysfs] 2024-11-23T08:28:07,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742412_1588 (size=5490) 2024-11-23T08:28:07,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742412_1588 (size=5490) 2024-11-23T08:28:07,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742412_1588 (size=5490) 2024-11-23T08:28:07,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742411_1587 (size=22217) 2024-11-23T08:28:07,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742411_1587 (size=22217) 2024-11-23T08:28:07,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742411_1587 (size=22217) 2024-11-23T08:28:07,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742413_1589 (size=476) 2024-11-23T08:28:07,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742413_1589 (size=476) 2024-11-23T08:28:07,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742413_1589 (size=476) 2024-11-23T08:28:07,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742414_1590 (size=22217) 2024-11-23T08:28:07,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742414_1590 (size=22217) 2024-11-23T08:28:07,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742414_1590 (size=22217) 2024-11-23T08:28:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742415_1591 (size=349894) 2024-11-23T08:28:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742415_1591 (size=349894) 2024-11-23T08:28:07,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742415_1591 (size=349894) 2024-11-23T08:28:07,401 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:52294 2024-11-23T08:28:07,408 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:55144 2024-11-23T08:28:07,418 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732350113698_0011_01_000003 is : 143 2024-11-23T08:28:07,427 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000003/launch_container.sh] 2024-11-23T08:28:07,427 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000003/container_tokens] 2024-11-23T08:28:07,427 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-1_0/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000003/sysfs] 2024-11-23T08:28:09,095 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T08:28:09,095 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T08:28:09,101 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,101 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T08:28:09,101 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T08:28:09,101 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,102 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T08:28:09,102 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T08:28:09,102 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-63341018_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,102 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T08:28:09,102 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/export-test/export-1732350470278/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T08:28:09,108 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T08:28:09,111 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350489111"}]},"ts":"1732350489111"} 2024-11-23T08:28:09,113 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-23T08:28:09,113 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-23T08:28:09,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-23T08:28:09,115 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, UNASSIGN}] 2024-11-23T08:28:09,116 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, UNASSIGN 2024-11-23T08:28:09,116 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, UNASSIGN 2024-11-23T08:28:09,116 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=faeb5f40e361a46141cc129e6a301658, regionState=CLOSING, regionLocation=b33d9013c60a,43417,1732350106962 2024-11-23T08:28:09,116 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=19aecca552920d1bec6fe5848db649c4, regionState=CLOSING, regionLocation=b33d9013c60a,39649,1732350107049 2024-11-23T08:28:09,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, UNASSIGN because future has completed 2024-11-23T08:28:09,118 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:28:09,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049}] 2024-11-23T08:28:09,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, UNASSIGN because future has completed 2024-11-23T08:28:09,118 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T08:28:09,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962}] 2024-11-23T08:28:09,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T08:28:09,271 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:28:09,271 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:28:09,271 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing 19aecca552920d1bec6fe5848db649c4, disabling compactions & flushes 2024-11-23T08:28:09,272 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. after waiting 0 ms 2024-11-23T08:28:09,272 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close faeb5f40e361a46141cc129e6a301658 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing faeb5f40e361a46141cc129e6a301658, disabling compactions & flushes 2024-11-23T08:28:09,272 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. after waiting 0 ms 2024-11-23T08:28:09,272 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:09,280 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658. 2024-11-23T08:28:09,280 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4. 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for faeb5f40e361a46141cc129e6a301658: Waiting for close lock at 1732350489272Running coprocessor pre-close hooks at 1732350489272Disabling compacts and flushes for region at 1732350489272Disabling writes for close at 1732350489272Writing region close event to WAL at 1732350489274 (+2 ms)Running coprocessor post-close hooks at 1732350489280 (+6 ms)Closed at 1732350489280 2024-11-23T08:28:09,280 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for 19aecca552920d1bec6fe5848db649c4: Waiting for close lock at 1732350489271Running coprocessor pre-close hooks at 1732350489271Disabling compacts and flushes for region at 1732350489271Disabling writes for close at 1732350489272 (+1 ms)Writing region close event to WAL at 1732350489274 (+2 ms)Running coprocessor post-close hooks at 1732350489280 (+6 ms)Closed at 1732350489280 2024-11-23T08:28:09,282 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed faeb5f40e361a46141cc129e6a301658 2024-11-23T08:28:09,283 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=faeb5f40e361a46141cc129e6a301658, regionState=CLOSED 2024-11-23T08:28:09,283 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed 19aecca552920d1bec6fe5848db649c4 2024-11-23T08:28:09,283 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=19aecca552920d1bec6fe5848db649c4, regionState=CLOSED 2024-11-23T08:28:09,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962 because future has completed 2024-11-23T08:28:09,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049 because future has completed 2024-11-23T08:28:09,287 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=250 2024-11-23T08:28:09,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=251 2024-11-23T08:28:09,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure faeb5f40e361a46141cc129e6a301658, server=b33d9013c60a,43417,1732350106962 in 167 msec 2024-11-23T08:28:09,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 19aecca552920d1bec6fe5848db649c4, server=b33d9013c60a,39649,1732350107049 in 168 msec 2024-11-23T08:28:09,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=faeb5f40e361a46141cc129e6a301658, UNASSIGN in 172 msec 2024-11-23T08:28:09,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=251, resume processing ppid=249 2024-11-23T08:28:09,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=19aecca552920d1bec6fe5848db649c4, UNASSIGN in 173 msec 2024-11-23T08:28:09,292 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-11-23T08:28:09,292 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 176 msec 2024-11-23T08:28:09,293 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732350489293"}]},"ts":"1732350489293"} 2024-11-23T08:28:09,294 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-23T08:28:09,295 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-23T08:28:09,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 187 msec 2024-11-23T08:28:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T08:28:09,430 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T08:28:09,431 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,435 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,436 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,439 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39649 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,441 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658 2024-11-23T08:28:09,441 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4 2024-11-23T08:28:09,443 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/recovered.edits] 2024-11-23T08:28:09,443 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf, FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/recovered.edits] 2024-11-23T08:28:09,446 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/cf/b2d78e62a2fb45ebb18b72237c859863 2024-11-23T08:28:09,446 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708 to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/cf/db40151d2f95430495694480b736c708 2024-11-23T08:28:09,448 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658/recovered.edits/9.seqid 2024-11-23T08:28:09,448 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/recovered.edits/9.seqid to hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4/recovered.edits/9.seqid 2024-11-23T08:28:09,449 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/faeb5f40e361a46141cc129e6a301658 2024-11-23T08:28:09,449 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testtb-testExportFileSystemStateWithSkipTmp/19aecca552920d1bec6fe5848db649c4 2024-11-23T08:28:09,449 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-23T08:28:09,451 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,454 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-23T08:28:09,456 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-23T08:28:09,457 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,457 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-23T08:28:09,458 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350489457"}]},"ts":"9223372036854775807"} 2024-11-23T08:28:09,458 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732350489457"}]},"ts":"9223372036854775807"} 2024-11-23T08:28:09,460 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T08:28:09,460 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => faeb5f40e361a46141cc129e6a301658, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732350467787.faeb5f40e361a46141cc129e6a301658.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 19aecca552920d1bec6fe5848db649c4, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T08:28:09,460 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-23T08:28:09,461 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732350489460"}]},"ts":"9223372036854775807"} 2024-11-23T08:28:09,462 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-23T08:28:09,463 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 32 msec 2024-11-23T08:28:09,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,494 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T08:28:09,494 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T08:28:09,494 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T08:28:09,494 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:28:09,501 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:28:09,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T08:28:09,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-11-23T08:28:09,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:28:09,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:28:09,503 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:28:09,504 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,504 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T08:28:09,504 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T08:28:09,511 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T08:28:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,514 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T08:28:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:09,537 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=825 (was 818) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:38484 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:58024 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 148187) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2022590357_1 at /127.0.0.1:52398 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36691 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-63341018_22 at /127.0.0.1:52410 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8939 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (294847031) connection to localhost/127.0.0.1:36691 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=816 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=877 (was 849) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 23), AvailableMemoryMB=1930 (was 1793) - AvailableMemoryMB LEAK? - 2024-11-23T08:28:09,537 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=825 is superior to 500 2024-11-23T08:28:09,537 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-23T08:28:09,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43c039d9{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T08:28:09,547 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d38c72c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:28:09,547 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:28:09,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@dbc49a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T08:28:09,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6632de68{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:28:13,496 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732350113698_0011_000001 (auth:SIMPLE) from 127.0.0.1:39868 2024-11-23T08:28:13,508 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000001/launch_container.sh] 2024-11-23T08:28:13,508 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000001/container_tokens] 2024-11-23T08:28:13,508 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_84671797/yarn-6041009597/MiniMRCluster_84671797-localDir-nm-0_2/usercache/jenkins/appcache/application_1732350113698_0011/container_1732350113698_0011_01_000001/sysfs] 2024-11-23T08:28:14,709 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:28:14,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:28:16,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T08:28:21,752 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:28:26,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@261d7d33{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T08:28:26,566 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58ab1eff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:28:26,567 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:28:26,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@573f63f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T08:28:26,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@543a633d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:28:27,320 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 31a9b15322d53a7b44d2ddb2b2ab3b79, had cached 0 bytes from a total of 5216 2024-11-23T08:28:30,548 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d68b63a66937da30826ba80d223788e3, had cached 0 bytes from a total of 8392 2024-11-23T08:28:34,730 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 44027 2024-11-23T08:28:43,583 ERROR [Thread[Thread-403,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T08:28:43,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39ead806{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T08:28:43,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e976c57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:28:43,585 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:28:43,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32fc7a65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T08:28:43,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddf8f6d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:28:43,589 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-23T08:28:43,593 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-23T08:28:43,594 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-23T08:28:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741830_1006 (size=1179446) 2024-11-23T08:28:43,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741830_1006 (size=1179446) 2024-11-23T08:28:43,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741830_1006 (size=1179446) 2024-11-23T08:28:43,597 ERROR [Thread[Thread-426,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T08:28:43,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d640796{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T08:28:43,602 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@734bdfe6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:28:43,602 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:28:43,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@339f0d8f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T08:28:43,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17a93e7b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:28:43,603 ERROR [Thread[Thread-385,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T08:28:43,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-23T08:28:43,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T08:28:43,603 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T08:28:43,603 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:28:43,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,603 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T08:28:43,604 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T08:28:43,604 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1589288820, stopped=false 2024-11-23T08:28:43,604 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,604 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T08:28:43,604 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b33d9013c60a,38501,1732350105792 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:28:43,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:28:43,653 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T08:28:43,654 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T08:28:43,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:28:43,655 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:28:43,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:28:43,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:28:43,656 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b33d9013c60a,45199,1732350106787' ***** 2024-11-23T08:28:43,657 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,657 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T08:28:43,657 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b33d9013c60a,43417,1732350106962' ***** 2024-11-23T08:28:43,657 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,657 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T08:28:43,657 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T08:28:43,657 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T08:28:43,657 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b33d9013c60a,39649,1732350107049' ***** 2024-11-23T08:28:43,657 INFO [RS:0;b33d9013c60a:45199 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T08:28:43,657 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,657 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T08:28:43,658 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T08:28:43,658 INFO [RS:1;b33d9013c60a:43417 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T08:28:43,658 INFO [RS:1;b33d9013c60a:43417 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T08:28:43,658 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T08:28:43,658 INFO [RS:2;b33d9013c60a:39649 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T08:28:43,658 INFO [RS:2;b33d9013c60a:39649 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T08:28:43,658 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T08:28:43,658 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(3091): Received CLOSE for 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:28:43,658 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(3091): Received CLOSE for 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:28:43,658 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T08:28:43,658 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(959): stopping server b33d9013c60a,43417,1732350106962 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b33d9013c60a:43417. 2024-11-23T08:28:43,659 DEBUG [RS:1;b33d9013c60a:43417 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:28:43,659 DEBUG [RS:1;b33d9013c60a:43417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,659 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 31a9b15322d53a7b44d2ddb2b2ab3b79, disabling compactions & flushes 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T08:28:43,659 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T08:28:43,659 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:28:43,659 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. after waiting 0 ms 2024-11-23T08:28:43,659 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T08:28:43,659 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:28:43,660 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(959): stopping server b33d9013c60a,39649,1732350107049 2024-11-23T08:28:43,660 INFO [RS:2;b33d9013c60a:39649 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T08:28:43,660 INFO [RS:2;b33d9013c60a:39649 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b33d9013c60a:39649. 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5c2409a8a8e1fdedb01950f8285b9431, disabling compactions & flushes 2024-11-23T08:28:43,660 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:28:43,660 DEBUG [RS:2;b33d9013c60a:39649 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:28:43,660 DEBUG [RS:2;b33d9013c60a:39649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. after waiting 0 ms 2024-11-23T08:28:43,660 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:28:43,660 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T08:28:43,660 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1325): Online Regions={31a9b15322d53a7b44d2ddb2b2ab3b79=testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T08:28:43,660 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1325): Online Regions={5c2409a8a8e1fdedb01950f8285b9431=hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431.} 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T08:28:43,660 INFO [RS:0;b33d9013c60a:45199 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T08:28:43,660 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T08:28:43,660 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(3091): Received CLOSE for d68b63a66937da30826ba80d223788e3 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T08:28:43,660 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(959): stopping server b33d9013c60a,45199,1732350106787 2024-11-23T08:28:43,660 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T08:28:43,660 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5c2409a8a8e1fdedb01950f8285b9431 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-11-23T08:28:43,660 INFO [RS:0;b33d9013c60a:45199 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T08:28:43,660 INFO [RS:0;b33d9013c60a:45199 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b33d9013c60a:45199. 2024-11-23T08:28:43,660 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=8.76 KB heapSize=14.76 KB 2024-11-23T08:28:43,660 DEBUG [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 31a9b15322d53a7b44d2ddb2b2ab3b79 2024-11-23T08:28:43,660 DEBUG [RS:0;b33d9013c60a:45199 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:28:43,661 DEBUG [RS:0;b33d9013c60a:45199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,661 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T08:28:43,661 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1325): Online Regions={d68b63a66937da30826ba80d223788e3=testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3.} 2024-11-23T08:28:43,661 DEBUG [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1351): Waiting on d68b63a66937da30826ba80d223788e3 2024-11-23T08:28:43,661 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d68b63a66937da30826ba80d223788e3, disabling compactions & flushes 2024-11-23T08:28:43,661 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:28:43,661 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:28:43,661 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. after waiting 0 ms 2024-11-23T08:28:43,661 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:28:43,661 DEBUG [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1351): Waiting on 5c2409a8a8e1fdedb01950f8285b9431 2024-11-23T08:28:43,669 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/485df1a3b2e84f3cbb453a390922a8c9 is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4./info:/1732350489457/DeleteFamily/seqid=0 2024-11-23T08:28:43,669 INFO [regionserver/b33d9013c60a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,670 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/31a9b15322d53a7b44d2ddb2b2ab3b79/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=8 2024-11-23T08:28:43,670 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,670 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:28:43,670 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 31a9b15322d53a7b44d2ddb2b2ab3b79: Waiting for close lock at 1732350523659Running coprocessor pre-close hooks at 1732350523659Disabling compacts and flushes for region at 1732350523659Disabling writes for close at 1732350523659Writing region close event to WAL at 1732350523661 (+2 ms)Running coprocessor post-close hooks at 1732350523670 (+9 ms)Closed at 1732350523670 2024-11-23T08:28:43,670 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732350375208.31a9b15322d53a7b44d2ddb2b2ab3b79. 2024-11-23T08:28:43,671 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/default/testExportExpiredSnapshot/d68b63a66937da30826ba80d223788e3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T08:28:43,671 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,671 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:28:43,672 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d68b63a66937da30826ba80d223788e3: Waiting for close lock at 1732350523661Running coprocessor pre-close hooks at 1732350523661Disabling compacts and flushes for region at 1732350523661Disabling writes for close at 1732350523661Writing region close event to WAL at 1732350523666 (+5 ms)Running coprocessor post-close hooks at 1732350523671 (+5 ms)Closed at 1732350523671 2024-11-23T08:28:43,672 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732350375208.d68b63a66937da30826ba80d223788e3. 2024-11-23T08:28:43,676 INFO [regionserver/b33d9013c60a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,676 INFO [regionserver/b33d9013c60a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742416_1592 (size=6387) 2024-11-23T08:28:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742416_1592 (size=6387) 2024-11-23T08:28:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742416_1592 (size=6387) 2024-11-23T08:28:43,680 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.91 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/485df1a3b2e84f3cbb453a390922a8c9 2024-11-23T08:28:43,683 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/.tmp/l/67fc62f6c19a4fe2abaa983e3f090360 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732350373111/DeleteFamily/seqid=0 2024-11-23T08:28:43,686 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/9cca5b92ce494e1ba7172ec94caaec7d is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4./ns:/1732350489451/DeleteFamily/seqid=0 2024-11-23T08:28:43,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742417_1593 (size=5860) 2024-11-23T08:28:43,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742417_1593 (size=5860) 2024-11-23T08:28:43,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742417_1593 (size=5860) 2024-11-23T08:28:43,689 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/.tmp/l/67fc62f6c19a4fe2abaa983e3f090360 2024-11-23T08:28:43,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742418_1594 (size=5927) 2024-11-23T08:28:43,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742418_1594 (size=5927) 2024-11-23T08:28:43,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742418_1594 (size=5927) 2024-11-23T08:28:43,692 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/9cca5b92ce494e1ba7172ec94caaec7d 2024-11-23T08:28:43,694 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 67fc62f6c19a4fe2abaa983e3f090360 2024-11-23T08:28:43,694 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/.tmp/l/67fc62f6c19a4fe2abaa983e3f090360 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/l/67fc62f6c19a4fe2abaa983e3f090360 2024-11-23T08:28:43,698 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/db9dac5869754b17955925cf28effb84 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4./rep_barrier:/1732350489451/DeleteFamily/seqid=0 2024-11-23T08:28:43,699 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 67fc62f6c19a4fe2abaa983e3f090360 2024-11-23T08:28:43,699 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/l/67fc62f6c19a4fe2abaa983e3f090360, entries=14, sequenceid=31, filesize=5.7 K 2024-11-23T08:28:43,700 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 5c2409a8a8e1fdedb01950f8285b9431 in 40ms, sequenceid=31, compaction requested=false 2024-11-23T08:28:43,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742419_1595 (size=5990) 2024-11-23T08:28:43,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742419_1595 (size=5990) 2024-11-23T08:28:43,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742419_1595 (size=5990) 2024-11-23T08:28:43,703 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/acl/5c2409a8a8e1fdedb01950f8285b9431/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-11-23T08:28:43,703 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/db9dac5869754b17955925cf28effb84 2024-11-23T08:28:43,704 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,704 INFO [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:28:43,704 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5c2409a8a8e1fdedb01950f8285b9431: Waiting for close lock at 1732350523660Running coprocessor pre-close hooks at 1732350523660Disabling compacts and flushes for region at 1732350523660Disabling writes for close at 1732350523660Obtaining lock to block concurrent updates at 1732350523660Preparing flush snapshotting stores in 5c2409a8a8e1fdedb01950f8285b9431 at 1732350523660Finished memstore snapshotting hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1732350523661 (+1 ms)Flushing stores of hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. at 1732350523662 (+1 ms)Flushing 5c2409a8a8e1fdedb01950f8285b9431/l: creating writer at 1732350523662Flushing 5c2409a8a8e1fdedb01950f8285b9431/l: appending metadata at 1732350523683 (+21 ms)Flushing 5c2409a8a8e1fdedb01950f8285b9431/l: closing flushed file at 1732350523683Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@742301ac: reopening flushed file at 1732350523694 (+11 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 5c2409a8a8e1fdedb01950f8285b9431 in 40ms, sequenceid=31, compaction requested=false at 1732350523700 (+6 ms)Writing region close event to WAL at 1732350523701 (+1 ms)Running coprocessor post-close hooks at 1732350523704 (+3 ms)Closed at 1732350523704 2024-11-23T08:28:43,704 DEBUG [RS_CLOSE_REGION-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732350110088.5c2409a8a8e1fdedb01950f8285b9431. 2024-11-23T08:28:43,709 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/e808fd0af8ba4058a0ce44d9fe5c018b is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732350467787.19aecca552920d1bec6fe5848db649c4./table:/1732350489451/DeleteFamily/seqid=0 2024-11-23T08:28:43,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742420_1596 (size=6012) 2024-11-23T08:28:43,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742420_1596 (size=6012) 2024-11-23T08:28:43,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742420_1596 (size=6012) 2024-11-23T08:28:43,714 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/e808fd0af8ba4058a0ce44d9fe5c018b 2024-11-23T08:28:43,718 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/info/485df1a3b2e84f3cbb453a390922a8c9 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/info/485df1a3b2e84f3cbb453a390922a8c9 2024-11-23T08:28:43,721 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/info/485df1a3b2e84f3cbb453a390922a8c9, entries=8, sequenceid=247, filesize=6.2 K 2024-11-23T08:28:43,722 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/ns/9cca5b92ce494e1ba7172ec94caaec7d as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/ns/9cca5b92ce494e1ba7172ec94caaec7d 2024-11-23T08:28:43,726 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/ns/9cca5b92ce494e1ba7172ec94caaec7d, entries=4, sequenceid=247, filesize=5.8 K 2024-11-23T08:28:43,726 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/rep_barrier/db9dac5869754b17955925cf28effb84 as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/rep_barrier/db9dac5869754b17955925cf28effb84 2024-11-23T08:28:43,729 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/rep_barrier/db9dac5869754b17955925cf28effb84, entries=4, sequenceid=247, filesize=5.8 K 2024-11-23T08:28:43,730 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/.tmp/table/e808fd0af8ba4058a0ce44d9fe5c018b as hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/table/e808fd0af8ba4058a0ce44d9fe5c018b 2024-11-23T08:28:43,734 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/table/e808fd0af8ba4058a0ce44d9fe5c018b, entries=6, sequenceid=247, filesize=5.9 K 2024-11-23T08:28:43,735 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=247, compaction requested=false 2024-11-23T08:28:43,738 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/data/hbase/meta/1588230740/recovered.edits/250.seqid, newMaxSeqId=250, maxSeqId=1 2024-11-23T08:28:43,739 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:43,739 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T08:28:43,739 INFO [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T08:28:43,739 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732350523660Running coprocessor pre-close hooks at 1732350523660Disabling compacts and flushes for region at 1732350523660Disabling writes for close at 1732350523660Obtaining lock to block concurrent updates at 1732350523660Preparing flush snapshotting stores in 1588230740 at 1732350523660Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=8966, getHeapSize=15048, getOffHeapSize=0, getCellsCount=68 at 1732350523661 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732350523662 (+1 ms)Flushing 1588230740/info: creating writer at 1732350523662Flushing 1588230740/info: appending metadata at 1732350523669 (+7 ms)Flushing 1588230740/info: closing flushed file at 1732350523669Flushing 1588230740/ns: creating writer at 1732350523684 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732350523686 (+2 ms)Flushing 1588230740/ns: closing flushed file at 1732350523686Flushing 1588230740/rep_barrier: creating writer at 1732350523696 (+10 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732350523698 (+2 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732350523698Flushing 1588230740/table: creating writer at 1732350523707 (+9 ms)Flushing 1588230740/table: appending metadata at 1732350523708 (+1 ms)Flushing 1588230740/table: closing flushed file at 1732350523708Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@165f024d: reopening flushed file at 1732350523717 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eaeb872: reopening flushed file at 1732350523722 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@557ac7c4: reopening flushed file at 1732350523726 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a8368c4: reopening flushed file at 1732350523730 (+4 ms)Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 75ms, sequenceid=247, compaction requested=false at 1732350523735 (+5 ms)Writing region close event to WAL at 1732350523736 (+1 ms)Running coprocessor post-close hooks at 1732350523739 (+3 ms)Closed at 1732350523739 2024-11-23T08:28:43,739 DEBUG [RS_CLOSE_META-regionserver/b33d9013c60a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T08:28:43,861 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(976): stopping server b33d9013c60a,43417,1732350106962; all regions closed. 2024-11-23T08:28:43,861 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(976): stopping server b33d9013c60a,45199,1732350106787; all regions closed. 2024-11-23T08:28:43,862 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(976): stopping server b33d9013c60a,39649,1732350107049; all regions closed. 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741834_1010 (size=21889) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741835_1011 (size=8592) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741835_1011 (size=8592) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741834_1010 (size=21889) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741834_1010 (size=21889) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741835_1011 (size=8592) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741836_1012 (size=104456) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741836_1012 (size=104456) 2024-11-23T08:28:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741836_1012 (size=104456) 2024-11-23T08:28:43,873 DEBUG [RS:2;b33d9013c60a:39649 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs 2024-11-23T08:28:43,873 DEBUG [RS:0;b33d9013c60a:45199 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs 2024-11-23T08:28:43,873 DEBUG [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs 2024-11-23T08:28:43,873 INFO [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b33d9013c60a%2C43417%2C1732350106962.meta:.meta(num 1732350109688) 2024-11-23T08:28:43,873 INFO [RS:0;b33d9013c60a:45199 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b33d9013c60a%2C45199%2C1732350106787:(num 1732350109305) 2024-11-23T08:28:43,873 INFO [RS:2;b33d9013c60a:39649 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b33d9013c60a%2C39649%2C1732350107049:(num 1732350109305) 2024-11-23T08:28:43,873 DEBUG [RS:2;b33d9013c60a:39649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,873 DEBUG [RS:0;b33d9013c60a:45199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,873 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,873 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,873 INFO [RS:0;b33d9013c60a:45199 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T08:28:43,873 INFO [RS:2;b33d9013c60a:39649 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T08:28:43,873 INFO [RS:0;b33d9013c60a:45199 {}] hbase.ChoreService(370): Chore service for: regionserver/b33d9013c60a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T08:28:43,873 INFO [RS:2;b33d9013c60a:39649 {}] hbase.ChoreService(370): Chore service for: regionserver/b33d9013c60a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-23T08:28:43,873 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T08:28:43,874 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T08:28:43,874 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T08:28:43,874 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T08:28:43,874 INFO [regionserver/b33d9013c60a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T08:28:43,874 INFO [RS:0;b33d9013c60a:45199 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T08:28:43,874 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T08:28:43,874 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T08:28:43,874 INFO [RS:2;b33d9013c60a:39649 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T08:28:43,874 INFO [regionserver/b33d9013c60a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T08:28:43,874 INFO [RS:0;b33d9013c60a:45199 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45199 2024-11-23T08:28:43,874 INFO [RS:2;b33d9013c60a:39649 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39649 2024-11-23T08:28:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741833_1009 (size=13986) 2024-11-23T08:28:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073741833_1009 (size=13986) 2024-11-23T08:28:43,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073741833_1009 (size=13986) 2024-11-23T08:28:43,878 DEBUG [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/oldWALs 2024-11-23T08:28:43,878 INFO [RS:1;b33d9013c60a:43417 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b33d9013c60a%2C43417%2C1732350106962:(num 1732350109305) 2024-11-23T08:28:43,878 DEBUG [RS:1;b33d9013c60a:43417 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T08:28:43,878 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T08:28:43,878 INFO [RS:1;b33d9013c60a:43417 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T08:28:43,878 INFO [RS:1;b33d9013c60a:43417 {}] hbase.ChoreService(370): Chore service for: regionserver/b33d9013c60a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T08:28:43,878 INFO [RS:1;b33d9013c60a:43417 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T08:28:43,878 INFO [regionserver/b33d9013c60a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T08:28:43,879 INFO [RS:1;b33d9013c60a:43417 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43417 2024-11-23T08:28:43,902 INFO [RS:2;b33d9013c60a:39649 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T08:28:43,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b33d9013c60a,39649,1732350107049 2024-11-23T08:28:43,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T08:28:43,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b33d9013c60a,45199,1732350106787 2024-11-23T08:28:43,911 INFO [RS:0;b33d9013c60a:45199 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T08:28:43,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b33d9013c60a,43417,1732350106962 2024-11-23T08:28:43,911 INFO [RS:1;b33d9013c60a:43417 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T08:28:43,919 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b33d9013c60a,43417,1732350106962] 2024-11-23T08:28:43,936 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b33d9013c60a,43417,1732350106962 already deleted, retry=false 2024-11-23T08:28:43,936 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b33d9013c60a,43417,1732350106962 expired; onlineServers=2 2024-11-23T08:28:43,936 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b33d9013c60a,45199,1732350106787] 2024-11-23T08:28:43,944 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b33d9013c60a,45199,1732350106787 already deleted, retry=false 2024-11-23T08:28:43,944 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b33d9013c60a,45199,1732350106787 expired; onlineServers=1 2024-11-23T08:28:43,944 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b33d9013c60a,39649,1732350107049] 2024-11-23T08:28:43,952 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b33d9013c60a,39649,1732350107049 already deleted, retry=false 2024-11-23T08:28:43,952 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b33d9013c60a,39649,1732350107049 expired; onlineServers=0 2024-11-23T08:28:43,952 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b33d9013c60a,38501,1732350105792' ***** 2024-11-23T08:28:43,952 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T08:28:43,953 INFO [M:0;b33d9013c60a:38501 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T08:28:43,953 INFO [M:0;b33d9013c60a:38501 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T08:28:43,953 DEBUG [M:0;b33d9013c60a:38501 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T08:28:43,953 DEBUG [M:0;b33d9013c60a:38501 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T08:28:43,953 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T08:28:43,953 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.large.0-1732350108761 {}] cleaner.HFileCleaner(306): Exit Thread[master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.large.0-1732350108761,5,FailOnTimeoutGroup] 2024-11-23T08:28:43,953 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.small.0-1732350108767 {}] cleaner.HFileCleaner(306): Exit Thread[master/b33d9013c60a:0:becomeActiveMaster-HFileCleaner.small.0-1732350108767,5,FailOnTimeoutGroup] 2024-11-23T08:28:43,953 INFO [M:0;b33d9013c60a:38501 {}] hbase.ChoreService(370): Chore service for: master/b33d9013c60a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T08:28:43,954 INFO [M:0;b33d9013c60a:38501 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T08:28:43,954 DEBUG [M:0;b33d9013c60a:38501 {}] master.HMaster(1795): Stopping service threads 2024-11-23T08:28:43,954 INFO [M:0;b33d9013c60a:38501 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T08:28:43,954 INFO [M:0;b33d9013c60a:38501 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T08:28:43,955 INFO [M:0;b33d9013c60a:38501 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T08:28:43,955 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T08:28:43,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T08:28:43,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T08:28:43,961 DEBUG [M:0;b33d9013c60a:38501 {}] zookeeper.ZKUtil(347): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T08:28:43,961 WARN [M:0;b33d9013c60a:38501 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T08:28:43,963 INFO [M:0;b33d9013c60a:38501 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/.lastflushedseqids 2024-11-23T08:28:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37117 is added to blk_1073742421_1597 (size=325) 2024-11-23T08:28:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41035 is added to blk_1073742421_1597 (size=325) 2024-11-23T08:28:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073742421_1597 (size=325) 2024-11-23T08:28:43,979 INFO [M:0;b33d9013c60a:38501 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T08:28:43,979 INFO [M:0;b33d9013c60a:38501 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T08:28:43,979 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T08:28:43,990 INFO [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:28:43,990 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:28:43,990 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T08:28:43,990 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T08:28:43,990 INFO [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1002.71 KB heapSize=1.17 MB 2024-11-23T08:28:43,991 ERROR [AsyncFSWAL-0-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:28:44,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39649-0x1016812678f0003, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,020 INFO [RS:2;b33d9013c60a:39649 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T08:28:44,021 INFO [RS:2;b33d9013c60a:39649 {}] regionserver.HRegionServer(1031): Exiting; stopping=b33d9013c60a,39649,1732350107049; zookeeper connection closed. 2024-11-23T08:28:44,021 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5912cf48 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5912cf48 2024-11-23T08:28:44,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,027 INFO [RS:0;b33d9013c60a:45199 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T08:28:44,027 INFO [RS:1;b33d9013c60a:43417 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T08:28:44,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43417-0x1016812678f0002, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45199-0x1016812678f0001, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:28:44,027 INFO [RS:1;b33d9013c60a:43417 {}] regionserver.HRegionServer(1031): Exiting; stopping=b33d9013c60a,43417,1732350106962; zookeeper connection closed. 2024-11-23T08:28:44,027 INFO [RS:0;b33d9013c60a:45199 {}] regionserver.HRegionServer(1031): Exiting; stopping=b33d9013c60a,45199,1732350106787; zookeeper connection closed. 2024-11-23T08:28:44,028 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@783ab542 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@783ab542 2024-11-23T08:28:44,028 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4cc0da8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4cc0da8 2024-11-23T08:28:44,029 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-23T08:28:44,709 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:28:46,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:46,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T08:28:46,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T08:28:46,249 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T08:28:46,250 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T08:28:46,250 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:46,251 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T08:28:46,251 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T08:28:49,190 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:29:14,710 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b33d9013c60a:38501 236 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 23 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@3cb0d971 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175ce574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.CountDownLatch$Sync@f5e1d8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12678 Waited count: 13370 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@10c2c33d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@60c5f314 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 966 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 8 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@5291bd21-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4215e6b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34105): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 162 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 47466 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3874aade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34105): State: TIMED_WAITING Blocked count: 116 Waited count: 2497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34105): State: TIMED_WAITING Blocked count: 126 Waited count: 2492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34105): State: TIMED_WAITING Blocked count: 106 Waited count: 2498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34105): State: TIMED_WAITING Blocked count: 104 Waited count: 2509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34105): State: TIMED_WAITING Blocked count: 122 Waited count: 2489 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 241 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1684352221-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87-acceptor-0@2c7712cc-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 963 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44749): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 331 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19fd1988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1440 Waited count: 1524 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 489 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1224444869-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1224444869-121-acceptor-0@2f24f579-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:34105 from jenkins): State: TIMED_WAITING Blocked count: 1434 Waited count: 1434 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1224444869-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 0 Waited count: 2156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 0 Waited count: 962 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 33243): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b960a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1470 Waited count: 1528 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 505 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@61b11724-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 961 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38549): State: TIMED_WAITING Blocked count: 1 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1351 Waited count: 1528 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 492 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 497 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@49481c24[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@1f8de31c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@5a2816f9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60357): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 241 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 370 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48f2ce70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:60357):): State: WAITING Blocked count: 1 Waited count: 468 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@251f80b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@285ca81f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:34105): State: TIMED_WAITING Blocked count: 13 Waited count: 499 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@55a27548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 431 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:60357)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 24 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b008b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 32 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f3d6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 9 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61bdff66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 163 Waited count: 616 Waiting on java.util.concurrent.Semaphore$NonfairSync@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 84 Waited count: 379 Waiting on java.util.concurrent.Semaphore$NonfairSync@26c59529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501): State: WAITING Blocked count: 40 Waited count: 10953 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fb83bea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b8efbf8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7168457d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f5157d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bb8be8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37e5c33a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 94 Waited count: 7 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;b33d9013c60a:38501): State: TIMED_WAITING Blocked count: 12 Waited count: 4176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1102/0x00007ff8a8f79478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@3084ce89): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4759 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 47506 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1037377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44763a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (LeaseRenewer:jenkins.hfs.1@localhost:34105): State: TIMED_WAITING Blocked count: 13 Waited count: 496 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (LeaseRenewer:jenkins.hfs.0@localhost:34105): State: TIMED_WAITING Blocked count: 13 Waited count: 497 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (LeaseRenewer:jenkins.hfs.2@localhost:34105): State: TIMED_WAITING Blocked count: 13 Waited count: 496 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 12 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 47357 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 571 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 811 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 589 (region-location-1): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 926 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1616 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@4090a35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2256 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2685 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 480 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5909 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 449 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6098 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6099 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5b3498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10275 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T08:29:44,710 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:30:14,710 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b33d9013c60a:38501 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 23 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@3cb0d971 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175ce574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 55 Waiting on java.util.concurrent.CountDownLatch$Sync@7d4d51a7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12678 Waited count: 13371 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@10c2c33d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@60c5f314 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1086 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 8 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@5291bd21-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4215e6b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34105): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 182 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 53394 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3874aade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34105): State: TIMED_WAITING Blocked count: 116 Waited count: 2557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34105): State: TIMED_WAITING Blocked count: 126 Waited count: 2552 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34105): State: TIMED_WAITING Blocked count: 106 Waited count: 2558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34105): State: TIMED_WAITING Blocked count: 104 Waited count: 2569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34105): State: TIMED_WAITING Blocked count: 122 Waited count: 2550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 271 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1684352221-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87-acceptor-0@2c7712cc-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1083 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44749): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 351 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19fd1988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1460 Waited count: 1564 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1224444869-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1224444869-121-acceptor-0@2f24f579-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:34105 from jenkins): State: TIMED_WAITING Blocked count: 1494 Waited count: 1494 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1224444869-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 0 Waited count: 2216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 0 Waited count: 1082 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 33243): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b960a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1490 Waited count: 1568 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 541 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@61b11724-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1081 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38549): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 331 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1371 Waited count: 1568 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 552 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@49481c24[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@1f8de31c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@5a2816f9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60357): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 271 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48f2ce70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:60357):): State: WAITING Blocked count: 1 Waited count: 473 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@251f80b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 508 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@285ca81f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@55a27548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:60357)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 24 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b008b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 32 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f3d6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 9 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61bdff66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 163 Waited count: 616 Waiting on java.util.concurrent.Semaphore$NonfairSync@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 84 Waited count: 379 Waiting on java.util.concurrent.Semaphore$NonfairSync@26c59529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501): State: WAITING Blocked count: 40 Waited count: 10953 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fb83bea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b8efbf8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7168457d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f5157d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bb8be8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37e5c33a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 94 Waited count: 7 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;b33d9013c60a:38501): State: TIMED_WAITING Blocked count: 12 Waited count: 4176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1102/0x00007ff8a8f79478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@3084ce89): State: TIMED_WAITING Blocked count: 0 Waited count: 179 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5359 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46886935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53509 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1037377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44763a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 12 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53360 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 571 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 811 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 589 (region-location-1): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 932 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1616 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@4090a35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2685 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5909 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 449 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6098 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6099 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5b3498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10275 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T08:30:44,711 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:31:14,711 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b33d9013c60a:38501 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 23 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@3cb0d971 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175ce574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6062 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 61 Waiting on java.util.concurrent.CountDownLatch$Sync@31d9b499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12678 Waited count: 13372 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@10c2c33d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@60c5f314 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1206 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 8 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@5291bd21-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4215e6b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34105): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 201 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 202 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 59317 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3874aade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34105): State: TIMED_WAITING Blocked count: 116 Waited count: 2617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34105): State: TIMED_WAITING Blocked count: 126 Waited count: 2612 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34105): State: TIMED_WAITING Blocked count: 106 Waited count: 2618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34105): State: TIMED_WAITING Blocked count: 104 Waited count: 2629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34105): State: TIMED_WAITING Blocked count: 122 Waited count: 2610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 301 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1684352221-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87-acceptor-0@2c7712cc-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1203 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44749): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19fd1988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1480 Waited count: 1604 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 602 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1224444869-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1224444869-121-acceptor-0@2f24f579-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:34105 from jenkins): State: TIMED_WAITING Blocked count: 1554 Waited count: 1554 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1224444869-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 0 Waited count: 2276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 0 Waited count: 1202 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 33243): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b960a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1510 Waited count: 1608 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@61b11724-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1201 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38549): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 121 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 351 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1391 Waited count: 1608 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 612 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 615 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 606 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@49481c24[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@1f8de31c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@5a2816f9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60357): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 301 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 379 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48f2ce70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:60357):): State: WAITING Blocked count: 1 Waited count: 477 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@251f80b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 512 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@285ca81f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@55a27548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:60357)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 24 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b008b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 32 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f3d6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 9 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61bdff66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 163 Waited count: 616 Waiting on java.util.concurrent.Semaphore$NonfairSync@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 84 Waited count: 379 Waiting on java.util.concurrent.Semaphore$NonfairSync@26c59529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501): State: WAITING Blocked count: 40 Waited count: 10953 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fb83bea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b8efbf8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7168457d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f5157d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bb8be8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37e5c33a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 94 Waited count: 7 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;b33d9013c60a:38501): State: TIMED_WAITING Blocked count: 12 Waited count: 4176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1102/0x00007ff8a8f79478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@3084ce89): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5958 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46886935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 59511 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1037377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44763a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 12 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 59362 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 571 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 812 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 589 (region-location-1): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 938 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1616 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@4090a35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5909 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 449 Waiting on java.util.concurrent.ForkJoinPool@15bd722e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6098 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6099 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5b3498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10275 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10276 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:31:44,711 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:31:47,209 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-11-23T08:31:47,210 DEBUG [master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T08:31:55,254 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T08:32:14,712 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b33d9013c60a:38501 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 23 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@3cb0d971 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175ce574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6662 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 67 Waiting on java.util.concurrent.CountDownLatch$Sync@dfdb43d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12678 Waited count: 13373 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@10c2c33d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@60c5f314 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1326 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 8 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@5291bd21-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4215e6b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34105): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 221 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 65244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3874aade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34105): State: TIMED_WAITING Blocked count: 116 Waited count: 2677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34105): State: TIMED_WAITING Blocked count: 126 Waited count: 2672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34105): State: TIMED_WAITING Blocked count: 106 Waited count: 2678 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34105): State: TIMED_WAITING Blocked count: 104 Waited count: 2689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34105): State: TIMED_WAITING Blocked count: 122 Waited count: 2670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1684352221-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87-acceptor-0@2c7712cc-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1323 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44749): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 391 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19fd1988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1500 Waited count: 1644 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 662 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 669 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1224444869-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1224444869-121-acceptor-0@2f24f579-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:34105 from jenkins): State: TIMED_WAITING Blocked count: 1614 Waited count: 1614 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1224444869-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 0 Waited count: 2336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 0 Waited count: 1322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 33243): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b960a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1530 Waited count: 1648 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 685 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@61b11724-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1321 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38549): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1411 Waited count: 1648 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 666 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4da4adad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@388d3c76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@49481c24[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@1f8de31c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645d2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@5a2816f9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60357): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48f2ce70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:60357):): State: WAITING Blocked count: 1 Waited count: 481 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@251f80b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@285ca81f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@55a27548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:60357)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 24 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b008b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 32 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f3d6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 9 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61bdff66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 163 Waited count: 616 Waiting on java.util.concurrent.Semaphore$NonfairSync@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 84 Waited count: 379 Waiting on java.util.concurrent.Semaphore$NonfairSync@26c59529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501): State: WAITING Blocked count: 40 Waited count: 10953 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fb83bea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b8efbf8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7168457d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f5157d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bb8be8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37e5c33a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 94 Waited count: 7 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;b33d9013c60a:38501): State: TIMED_WAITING Blocked count: 12 Waited count: 4176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1102/0x00007ff8a8f79478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@3084ce89): State: TIMED_WAITING Blocked count: 0 Waited count: 219 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6558 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46886935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65514 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1037377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44763a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 12 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65364 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 589 (region-location-1): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1616 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@4090a35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5909 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6098 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6099 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5b3498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10276 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10280 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T08:32:44,712 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:33:14,712 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:33:43,993 DEBUG [M:0;b33d9013c60a:38501 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732350523979Disabling compacts and flushes for region at 1732350523979Disabling writes for close at 1732350523990 (+11 ms)Obtaining lock to block concurrent updates at 1732350523990Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732350523990Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1026776, getHeapSize=1231744, getOffHeapSize=0, getCellsCount=2706 at 1732350523991 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732350823992 (+300001 ms) 2024-11-23T08:33:43,993 WARN [M:0;b33d9013c60a:38501 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-23T08:33:43,996 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:33:43,999 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-23T08:33:43,999 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-23T08:33:44,000 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 2024-11-23T08:33:44,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:33:44,003 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:33:44,003 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 2024-11-23T08:33:44,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b33d9013c60a:38501 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 23 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@3cb0d971 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 29 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175ce574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7261 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 73 Waiting on java.util.concurrent.CountDownLatch$Sync@662b0efc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12678 Waited count: 13374 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@10c2c33d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@60c5f314 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1446 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 8 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@5291bd21-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:37141}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 38 Waited count: 3127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4215e6b5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 34105): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 241 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 242 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 71172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3874aade Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 34105): State: TIMED_WAITING Blocked count: 116 Waited count: 2738 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 34105): State: TIMED_WAITING Blocked count: 126 Waited count: 2732 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 34105): State: TIMED_WAITING Blocked count: 106 Waited count: 2739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 34105): State: TIMED_WAITING Blocked count: 104 Waited count: 2750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 34105): State: TIMED_WAITING Blocked count: 122 Waited count: 2730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 361 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(568031931)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1684352221-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1684352221-87-acceptor-0@2c7712cc-ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1684352221-88): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1684352221-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2e157a8e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@456f4ad1): State: TIMED_WAITING Blocked count: 0 Waited count: 1443 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 44749): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 411 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19fd1988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1520 Waited count: 1684 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c25da2f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 723 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 761 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 44749): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1224444869-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1224444869-121-acceptor-0@2f24f579-ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:40381}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (294847031) connection to localhost/127.0.0.1:34105 from jenkins): State: TIMED_WAITING Blocked count: 1674 Waited count: 1674 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1224444869-122): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 0 Waited count: 2396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1224444869-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-42ed3c18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3a4628a4): State: TIMED_WAITING Blocked count: 0 Waited count: 1442 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 129 (IPC Server idle connection scanner for port 33243): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 131 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 134 (Command processor): State: WAITING Blocked count: 1 Waited count: 423 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b960a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 135 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1550 Waited count: 1688 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7d8e926b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 127 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 137 (IPC Server handler 0 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 138 (IPC Server handler 1 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 734 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 2 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 721 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 3 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 734 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 4 on default port 33243): State: TIMED_WAITING Blocked count: 0 Waited count: 745 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1562542143-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007ff8a842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1562542143-154-acceptor-0@61b11724-ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:39875}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1562542143-155): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1562542143-156): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-5d4c4796-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7b2cefe2): State: TIMED_WAITING Blocked count: 0 Waited count: 1441 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 38549): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 391 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@142993b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105): State: TIMED_WAITING Blocked count: 1431 Waited count: 1688 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@66649d3f): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 732 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 734 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 38549): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1)): State: TIMED_WAITING Blocked count: 17 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4)): State: TIMED_WAITING Blocked count: 22 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4da4adad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@388d3c76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (java.util.concurrent.ThreadPoolExecutor$Worker@49481c24[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@1f8de31c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@645d2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@5a2816f9[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60357): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 361 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 388 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48f2ce70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:60357):): State: WAITING Blocked count: 1 Waited count: 486 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@251f80b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 521 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@285ca81f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@55a27548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:60357)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 24 Waited count: 58 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41b008b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 32 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27f3d6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 9 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b7dbb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61bdff66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 163 Waited count: 616 Waiting on java.util.concurrent.Semaphore$NonfairSync@67f1acae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 84 Waited count: 379 Waiting on java.util.concurrent.Semaphore$NonfairSync@26c59529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38501): State: WAITING Blocked count: 40 Waited count: 10953 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fb83bea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@11f0a1b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7b8efbf8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7168457d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f5157d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=38501): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3bb8be8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37e5c33a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 94 Waited count: 7 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;b33d9013c60a:38501): State: TIMED_WAITING Blocked count: 12 Waited count: 4177 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1433/0x00007ff8a9209b58.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/b33d9013c60a:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@3084ce89): State: TIMED_WAITING Blocked count: 0 Waited count: 239 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 381 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7157 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 65 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46886935 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 71515 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 16 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 14 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 456 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32b26245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1037377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44763a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/b33d9013c60a:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e3ecfad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (region-location-0): State: WAITING Blocked count: 12 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 71366 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 589 (region-location-1): State: WAITING Blocked count: 3 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 590 (region-location-2): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 950 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1052 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 71 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7243b010 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1140 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1203 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1204 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1205 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1255 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1256 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1257 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1616 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@4090a35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1976 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1977 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@fbf2065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6098 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6099 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10272 (AsyncFSWAL-1-hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData-prefix:b33d9013c60a,38501,1732350105792): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a5b3498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10280 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10284 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10285 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1419/0x00007ff8a9206ed8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T08:33:44,713 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T08:33:48,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T08:33:48,997 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-23T08:33:48,997 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T08:33:48,998 INFO [M:0;b33d9013c60a:38501 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T08:33:48,998 INFO [M:0;b33d9013c60a:38501 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38501 2024-11-23T08:33:48,998 INFO [M:0;b33d9013c60a:38501 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T08:33:49,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34105/user/jenkins/test-data/68c97837-54b4-f680-9c32-d5c2f337adc8/MasterData/WALs/b33d9013c60a,38501,1732350105792/b33d9013c60a%2C38501%2C1732350105792.1732350107652 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-23T08:33:49,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:33:49,158 INFO [M:0;b33d9013c60a:38501 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T08:33:49,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38501-0x1016812678f0000, quorum=127.0.0.1:60357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T08:33:49,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@474673d3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:33:49,166 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5efaea97{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:33:49,166 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:33:49,166 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@108f4b55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T08:33:49,167 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2de28195{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:33:49,169 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T08:33:49,169 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T08:33:49,169 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T08:33:49,169 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1830007029-172.17.0.2-1732350100447 (Datanode Uuid 8691d9da-dea3-44e2-98c0-b3ac4322e349) service to localhost/127.0.0.1:34105 2024-11-23T08:33:49,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data5/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,171 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data6/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,171 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T08:33:49,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8fd4906{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:33:49,173 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@600292e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:33:49,173 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:33:49,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1de9333b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T08:33:49,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32a76e2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:33:49,174 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T08:33:49,174 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T08:33:49,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T08:33:49,174 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1830007029-172.17.0.2-1732350100447 (Datanode Uuid f41c14d4-e902-43cc-9e0d-b02b9537cf74) service to localhost/127.0.0.1:34105 2024-11-23T08:33:49,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data3/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,175 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data4/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,175 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T08:33:49,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74bb782c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T08:33:49,177 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ea2aa50{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:33:49,177 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:33:49,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3622d218{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T08:33:49,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4521559e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:33:49,178 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T08:33:49,178 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T08:33:49,178 WARN [BP-1830007029-172.17.0.2-1732350100447 heartbeating to localhost/127.0.0.1:34105 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1830007029-172.17.0.2-1732350100447 (Datanode Uuid 45c86a91-b353-4da8-bbc2-31f7404968bc) service to localhost/127.0.0.1:34105 2024-11-23T08:33:49,178 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T08:33:49,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data1/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/cluster_130b8e46-9727-6cca-6eb8-fade34477f52/data/data2/current/BP-1830007029-172.17.0.2-1732350100447 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T08:33:49,179 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T08:33:49,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12351f7e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T08:33:49,185 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T08:33:49,185 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T08:33:49,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cd6ab6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T08:33:49,185 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@654c02d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/6335df67-4fc4-0c11-a058-665819c18ae1/hadoop.log.dir/,STOPPED} 2024-11-23T08:33:49,198 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T08:33:49,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down